[hbase-filesystem] branch master updated: HBASE-27042. Remove S3Guard awareness from HBoss

2022-05-23 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git


The following commit(s) were added to refs/heads/master by this push:
 new 8ba3a91  HBASE-27042. Remove S3Guard awareness from HBoss
8ba3a91 is described below

commit 8ba3a9138d0d9fee8cba7a21c8ee542eb2ffe490
Author: Steve Loughran 
AuthorDate: Mon May 16 14:22:29 2022 +0100

HBASE-27042. Remove S3Guard awareness from HBoss

* Cut HBoss test code which would explicitly disable S3Guard
  -obsolete and breaks the build on forthcoming hadoop releases.
* Also cut all references in docs and XML.
* EmbeddedS3 to implement deleteObject() and so support
  AWS SDKs 1.11.1026+, which expects this (hadoop 3.3.2+).
* Maven test runs to provide full stack traces on failures,
  so runtime failures can be debugged and fixed.

Change-Id: Iab10cfc6780cea064ea116efc1c177332d780514
Signed-off-by: Josh Elser 
---
 .../main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java|  6 ++
 hbase-oss/README.md  | 12 +---
 .../src/test/java/org/apache/hadoop/hbase/oss/TestUtils.java |  5 -
 hbase-oss/src/test/resources/core-site.xml   | 12 
 pom.xml  |  1 +
 5 files changed, 12 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java 
b/hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
index 2284a6d..864b661 100644
--- a/hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
+++ b/hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
@@ -23,6 +23,7 @@ import com.amazonaws.services.s3.AbstractAmazonS3;
 import com.amazonaws.services.s3.model.Bucket;
 import com.amazonaws.services.s3.model.CopyObjectRequest;
 import com.amazonaws.services.s3.model.CopyObjectResult;
+import com.amazonaws.services.s3.model.DeleteObjectRequest;
 import com.amazonaws.services.s3.model.DeleteObjectsRequest;
 import com.amazonaws.services.s3.model.DeleteObjectsResult;
 import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
@@ -145,6 +146,11 @@ public class EmbeddedS3 {
   bucket.remove(key);
 }
 
+public void deleteObject(DeleteObjectRequest request) {
+  LOG.debug("deleteObject: {}", request);
+  bucket.remove(request.getKey());
+}
+
 public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) {
   for (DeleteObjectsRequest.KeyVersion keyVersion : request.getKeys()) {
 String key = keyVersion.getKey();
diff --git a/hbase-oss/README.md b/hbase-oss/README.md
index 9beb529..99c3c3d 100644
--- a/hbase-oss/README.md
+++ b/hbase-oss/README.md
@@ -74,19 +74,17 @@ You may also want to configure:
 
 An implementation based on Amazon's DynamoDB lock library was considered but
 was not completed due to the lack of an efficient way to traverse the tree and
-discover locks on child nodes. The benefit is that S3Guard is required for s3a
-use and as such there's a dependency on DynamoDB anyway.
+discover locks on child nodes.
 
 ## Storage Implementations
 
 Currently HBOSS is primarily designed for and exclusively tested with Hadoop's
-s3a client against Amazon S3. *S3Guard must be enabled, which is available in
-Hadoop 2.9.0, 3.0.0, and higher*.
+s3a client against Amazon S3.
 
-Both the use of S3Guard and Zookeeper for locking (i.e. Zookeeper) have
+The use of Zookeeper for locking (i.e. Zookeeper) have
 implications for other clients that are not configured to share the same
-metadata store and Zookeeper ensemble. Ideally, all clients should be have the
-same configuration in these respects. Read-only clients may not share these
+Zookeeper ensemble. Ideally, all clients should have the
+same configuration. Read-only clients may not share these
 resources with the HBase processes, but they will not have the added safety
 provided by these features. Clients that do not share these resources and 
modify
 data can compromise the correctness of HBase.
diff --git a/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/TestUtils.java 
b/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/TestUtils.java
index f2e7011..b283715 100644
--- a/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/TestUtils.java
+++ b/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/TestUtils.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.oss;
 
 import static org.apache.hadoop.fs.s3a.Constants.S3_CLIENT_FACTORY_IMPL;
-import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
 import static org.apache.hadoop.hbase.oss.Constants.DATA_URI;
 
 import java.net.URI;
@@ -29,7 +28,6 @@ import java.net.URL;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hado

[hbase] branch HBASE-26553 updated: HBASE-26667 Integrate user-experience for hbase-client (#4064)

2022-03-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch HBASE-26553
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-26553 by this push:
 new d94c472  HBASE-26667 Integrate user-experience for hbase-client (#4064)
d94c472 is described below

commit d94c472ef51da3c7913a195e219f8c23265789b3
Author: Andor Molnár 
AuthorDate: Fri Mar 25 23:21:04 2022 +0100

HBASE-26667 Integrate user-experience for hbase-client (#4064)

Signed-off-by: Josh Elser 
---
 .../hadoop/hbase/client/ConnectionFactory.java |  18 +++-
 .../OAuthBearerSaslAuthenticationProvider.java |   2 +-
 .../provider/OAuthBearerSaslProviderSelector.java  |   5 +-
 .../hbase/security/token/OAuthBearerTokenUtil.java |  47 -
 .../security/token/TestOAuthBearerTokenUtil.java   | 115 +
 .../security/oauthbearer/OAuthBearerUtils.java |   1 +
 6 files changed, 181 insertions(+), 7 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index a3cf557..0351c58 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
-
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.security.PrivilegedExceptionAction;
@@ -30,6 +29,7 @@ import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.token.OAuthBearerTokenUtil;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
@@ -70,7 +70,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Public
 public class ConnectionFactory {
 
-  public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = 
"hbase.client.async.connection.impl";
+  public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL =
+"hbase.client.async.connection.impl";
+
+  /** Environment variable for OAuth Bearer token */
+  public static final String ENV_OAUTHBEARER_TOKEN = "HBASE_JWT";
 
   /** No public c.tors */
   protected ConnectionFactory() {
@@ -214,6 +218,11 @@ public class ConnectionFactory {
*/
   public static Connection createConnection(Configuration conf, 
ExecutorService pool,
   final User user) throws IOException {
+
+if (System.getenv().containsKey(ENV_OAUTHBEARER_TOKEN)) {
+  OAuthBearerTokenUtil.addTokenFromEnvironmentVar(user, 
System.getenv(ENV_OAUTHBEARER_TOKEN));
+}
+
 Class clazz = 
conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
   ConnectionOverAsyncConnection.class, Connection.class);
 if (clazz != ConnectionOverAsyncConnection.class) {
@@ -293,6 +302,11 @@ public class ConnectionFactory {
   future.completeExceptionally(new IOException("clusterid came back 
null"));
   return;
 }
+
+if (System.getenv().containsKey(ENV_OAUTHBEARER_TOKEN)) {
+  OAuthBearerTokenUtil.addTokenFromEnvironmentVar(user, 
System.getenv(ENV_OAUTHBEARER_TOKEN));
+}
+
 Class clazz = 
conf.getClass(HBASE_CLIENT_ASYNC_CONNECTION_IMPL,
   AsyncConnectionImpl.class, AsyncConnection.class);
 try {
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslAuthenticationProvider.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslAuthenticationProvider.java
index 8b4dcfe..315ce98 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslAuthenticationProvider.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslAuthenticationProvider.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hbase.security.provider;
 
-import static 
org.apache.hadoop.hbase.security.token.OAuthBearerTokenUtil.TOKEN_KIND;
+import static 
org.apache.hadoop.hbase.security.oauthbearer.OAuthBearerUtils.TOKEN_KIND;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.yetus.audience.InterfaceAudience;
 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslProviderSelector.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslProviderSelector.java
index 88c2eed..bfd7d8a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/OAuthBearerSaslProviderSelecto

[hbase] branch master updated: HBASE-26767 Rest server should not use a large Header Cache.

2022-02-23 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 2d76b04  HBASE-26767 Rest server should not use a large Header Cache.
2d76b04 is described below

commit 2d76b04197ac1e32917d5e04b48b592240fe7124
Author: Sergey Soldatov 
AuthorDate: Wed Feb 23 15:29:37 2022 -0500

HBASE-26767 Rest server should not use a large Header Cache.

Closes #4123

Co-authored-by: Tak Lon (Stephen) Wu 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
index d584cdd..3911d2d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -94,6 +94,8 @@ public class RESTServer implements Constants {
   static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = 
"GET,OPTIONS,HEAD,TRACE";
   public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login";
   static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
+  static final String HTTP_HEADER_CACHE_SIZE = 
"hbase.rest.http.header.cache.size";
+  static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1;
 
   private static final String PATH_SPEC_ANY = "/*";
 
@@ -291,10 +293,12 @@ public class RESTServer implements Constants {
 
 String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
 int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 
8080);
+int httpHeaderCacheSize = 
servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE,
+  DEFAULT_HTTP_HEADER_CACHE_SIZE);
 HttpConfiguration httpConfig = new HttpConfiguration();
 httpConfig.setSecureScheme("https");
 httpConfig.setSecurePort(servicePort);
-httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
+httpConfig.setHeaderCacheSize(httpHeaderCacheSize);
 httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
 httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
 httpConfig.setSendServerVersion(false);


[hbase] branch branch-2 updated: HBASE-26767 Rest server should not use a large Header Cache.

2022-02-23 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new e1131a2  HBASE-26767 Rest server should not use a large Header Cache.
e1131a2 is described below

commit e1131a2608f66972dceaf2a152ab77c6e8b0a3cf
Author: Sergey Soldatov 
AuthorDate: Wed Feb 23 15:29:37 2022 -0500

HBASE-26767 Rest server should not use a large Header Cache.

Closes #4123

Co-authored-by: Tak Lon (Stephen) Wu 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
index 9ceef8c..4806b6d 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -94,6 +94,8 @@ public class RESTServer implements Constants {
   static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = 
"GET,OPTIONS,HEAD,TRACE";
   public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login";
   static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
+  static final String HTTP_HEADER_CACHE_SIZE = 
"hbase.rest.http.header.cache.size";
+  static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1;
 
   private static final String PATH_SPEC_ANY = "/*";
 
@@ -294,10 +296,12 @@ public class RESTServer implements Constants {
 
 String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
 int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 
8080);
+int httpHeaderCacheSize = 
servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE,
+  DEFAULT_HTTP_HEADER_CACHE_SIZE);
 HttpConfiguration httpConfig = new HttpConfiguration();
 httpConfig.setSecureScheme("https");
 httpConfig.setSecurePort(servicePort);
-httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
+httpConfig.setHeaderCacheSize(httpHeaderCacheSize);
 httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
 httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
 httpConfig.setSendServerVersion(false);


[hbase] branch branch-2.4 updated: HBASE-26767 Rest server should not use a large Header Cache.

2022-02-23 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 6b10e24  HBASE-26767 Rest server should not use a large Header Cache.
6b10e24 is described below

commit 6b10e242c998a56c2a6149774462b634c9a03dbc
Author: Sergey Soldatov 
AuthorDate: Wed Feb 23 15:29:37 2022 -0500

HBASE-26767 Rest server should not use a large Header Cache.

Closes #4123

Co-authored-by: Tak Lon (Stephen) Wu 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
index dfcb8e9..40dfa90 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -93,6 +93,8 @@ public class RESTServer implements Constants {
   static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = 
"GET,OPTIONS,HEAD,TRACE";
   public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login";
   static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
+  static final String HTTP_HEADER_CACHE_SIZE = 
"hbase.rest.http.header.cache.size";
+  static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1;
 
   private static final String PATH_SPEC_ANY = "/*";
 
@@ -293,10 +295,12 @@ public class RESTServer implements Constants {
 
 String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
 int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 
8080);
+int httpHeaderCacheSize = 
servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE,
+  DEFAULT_HTTP_HEADER_CACHE_SIZE);
 HttpConfiguration httpConfig = new HttpConfiguration();
 httpConfig.setSecureScheme("https");
 httpConfig.setSecurePort(servicePort);
-httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
+httpConfig.setHeaderCacheSize(httpHeaderCacheSize);
 httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
 httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
 httpConfig.setSendServerVersion(false);


[hbase-filesystem] branch master updated: HBASE-26236 Simple travis build for hbase-filesystem (#28)

2022-01-24 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git


The following commit(s) were added to refs/heads/master by this push:
 new 9d3d800  HBASE-26236 Simple travis build for hbase-filesystem (#28)
9d3d800 is described below

commit 9d3d800b6f8ba4542ca808dfadf9c12bdb9e2766
Author: Peter Somogyi 
AuthorDate: Tue Jan 25 02:40:28 2022 +0100

HBASE-26236 Simple travis build for hbase-filesystem (#28)

Co-authored-by: Josh Elser 
---
 .travis.yml | 36 
 1 file changed, 36 insertions(+)

diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 000..3a86f63
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+language: java
+jdk:
+  - openjdk8
+  - openjdk11
+env:
+  - HADOOP_PROFILE=3.2
+  - HADOOP_PROFILE=3.3
+dist: xenial
+os: linux
+jobs:
+  fast_finish: true
+script:
+  - "mvn clean install -B -V -Dhadoop.profile=${HADOOP_PROFILE}"
+branches:
+  only:
+- master
+git:
+  depth: 1
+cache:
+  directories:
+- $HOME/.m2


[hbase-operator-tools] branch master updated: HBASE-26687 Avoid the newBuilder(RegionInfo) constructor in RegionInf… (#103)

2022-01-19 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new b7d9fc8  HBASE-26687 Avoid the newBuilder(RegionInfo) constructor in 
RegionInf… (#103)
b7d9fc8 is described below

commit b7d9fc8a5f96517f2c6ebe733afa52155d7e3cba
Author: Josh Elser 
AuthorDate: Wed Jan 19 19:55:07 2022 -0500

HBASE-26687 Avoid the newBuilder(RegionInfo) constructor in RegionInf… 
(#103)

A previously-fixed bug in HBase might break this tool in that the new
RegionInfo built by the Tool is still incorrect because the region name
and region encoded name are not recomputed. Thankfully, the sanity check
on the tool prevented any damage from being done to hbase:meta.

Signed-off-by: Peter Somogyi 
---
 .../src/main/java/org/apache/hbase/RegionInfoMismatchTool.java | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git 
a/hbase-hbck2/src/main/java/org/apache/hbase/RegionInfoMismatchTool.java 
b/hbase-hbck2/src/main/java/org/apache/hbase/RegionInfoMismatchTool.java
index bc37423..494191e 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/RegionInfoMismatchTool.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/RegionInfoMismatchTool.java
@@ -148,9 +148,17 @@ public class RegionInfoMismatchTool {
 }
 // Third component of a region name is just a literal numeric (not a 
binary-encoded long)
 long regionId = Long.parseLong(Bytes.toString(regionNameParts[2]));
-RegionInfo correctedRegionInfo = 
RegionInfoBuilder.newBuilder(wrongRegionInfo)
+// HBASE-24500: We cannot use newBuilder(RegionInfo) because it will 
copy the NAME and
+// encodedName from the original RegionInfo instead of re-computing 
it. Copy all of the
+// fields by hand which will force the new RegionInfo to recompute the 
NAME/encodedName
+// fields.
+RegionInfo correctedRegionInfo = 
RegionInfoBuilder.newBuilder(wrongRegionInfo.getTable())
 .setRegionId(regionId)
+.setStartKey(wrongRegionInfo.getStartKey())
+.setEndKey(wrongRegionInfo.getEndKey())
 .setReplicaId(0)
+.setOffline(wrongRegionInfo.isOffline())
+.setSplit(wrongRegionInfo.isSplit())
 .build();
 
 String rowkeyEncodedRegionName = 
HBCKRegionInfo.encodeRegionName(regionName);


[hbase-operator-tools] branch master updated: HBASE-26656 Utility to correct corrupt RegionInfo's in hbase:meta

2022-01-12 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new e3a8f96  HBASE-26656 Utility to correct corrupt RegionInfo's in 
hbase:meta
e3a8f96 is described below

commit e3a8f96d0b9e9b985d7f6f2952aaa929047c5b08
Author: Josh Elser 
AuthorDate: Tue Dec 14 18:02:38 2021 -0500

HBASE-26656 Utility to correct corrupt RegionInfo's in hbase:meta

A standalone utility which corrects hbase:meta given the problem
described by HBASE-23328. Includes the ability to both "report" corrupt
regions as well as correct them. This tool will ensure that other
HBCK2 utilities continue to work without additional modification.

Signed-off-by: Peter Somogyi 

Closes #102
---
 .../src/main/java/org/apache/hbase/HBCK2.java  |  55 
 .../main/java/org/apache/hbase/HBCKRegionInfo.java | 150 ++
 .../org/apache/hbase/RegionInfoMismatchTool.java   | 182 +
 .../apache/hbase/TestRegionInfoMismatchTool.java   | 302 +
 4 files changed, 689 insertions(+)

diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java 
b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
index a788a83..84dc834 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
@@ -107,6 +107,7 @@ public class HBCK2 extends Configured implements 
org.apache.hadoop.util.Tool {
   private static final String RECOVER_UNKNOWN = "recoverUnknown";
   private static final String GENERATE_TABLE_INFO = 
"generateMissingTableDescriptorFile";
   private static final String FIX_META = "fixMeta";
+  private static final String REGIONINFO_MISMATCH = "regionInfoMismatch";
   // TODO update this map in case of the name of a method changes in Hbck 
interface
   //  in org.apache.hadoop.hbase.client package. Or a new command is added and 
the hbck command
   //  does not equals to the method name in Hbck interface.
@@ -422,6 +423,23 @@ public class HBCK2 extends Configured implements 
org.apache.hadoop.util.Tool {
 return hbck.scheduleSCPsForUnknownServers();
   }
 
+  /**
+   * Runs the RegionInfoMismatchTool using CLI options.
+   */
+  void regionInfoMismatch(String[] args) throws Exception {
+// CLI Options
+Options options = new Options();
+Option dryRunOption = 
Option.builder("f").longOpt("fix").hasArg(false).build();
+options.addOption(dryRunOption);
+// Parse command-line.
+CommandLineParser parser = new DefaultParser();
+CommandLine commandLine = parser.parse(options, args, false);
+final boolean fix = commandLine.hasOption(dryRunOption.getOpt());
+try (ClusterConnection connection = connect()) {
+  new RegionInfoMismatchTool(connection).run(fix);
+}
+  }
+
   private HBaseProtos.ServerName parseServerName(String serverName) {
 ServerName sn = ServerName.parseServerName(serverName);
 return HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()).
@@ -472,6 +490,8 @@ public class HBCK2 extends Configured implements 
org.apache.hadoop.util.Tool {
 writer.println();
 usageUnassigns(writer);
 writer.println();
+usageRegioninfoMismatch(writer);
+writer.println();
 writer.close();
 return sw.toString();
   }
@@ -728,6 +748,27 @@ public class HBCK2 extends Configured implements 
org.apache.hadoop.util.Tool {
 writer.println("   hbase:meta tool. See the HBCK2 README for how to use.");
   }
 
+  private static void usageRegioninfoMismatch(PrintWriter writer) {
+writer.println(" " + REGIONINFO_MISMATCH);
+writer.println("   Options:");
+writer.println("   -f,--fix Update hbase:meta with the corrections");
+writer.println("   It is recommended to first run this utility without the 
fix");
+writer.println("   option to ensure that the utility is generating the 
correct");
+writer.println("   serialized RegionInfo data structures. Inspect the 
output to");
+writer.println("   confirm that the hbase:meta rowkey matches the new 
RegionInfo.");
+writer.println();
+writer.println("   This tool will read hbase:meta and report any regions 
whose rowkey");
+writer.println("   and cell value differ in their encoded region name. 
HBASE-23328 ");
+writer.println("   illustrates a problem for read-replica enabled tables 
in which ");
+writer.println("   the encoded region name (the MD5 hash) does not match 
between ");
+writer.println("   the rowkey and the value. This problem is generally 
harmless ");
+writer.println("   for normal operation, but can break other HBCK2 

[hbase] 15/16: HBASE-26286: Add support for specifying store file tracker when restoring or cloning snapshot

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 771e552cf4a6436176de06eb033c6de6ee545c71
Author: BukrosSzabolcs 
AuthorDate: Wed Dec 15 20:09:03 2021 -0500

HBASE-26286: Add support for specifying store file tracker when restoring 
or cloning snapshot

Closes #3851

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../java/org/apache/hadoop/hbase/client/Admin.java | 44 --
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   |  7 ++-
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java | 14 -
 .../hadoop/hbase/client/AsyncHBaseAdmin.java   |  6 +-
 .../hbase/client/ColumnFamilyDescriptor.java   |  5 ++
 .../client/ColumnFamilyDescriptorBuilder.java  |  6 ++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java| 24 +---
 .../src/main/protobuf/server/master/Master.proto   |  1 +
 .../protobuf/server/master/MasterProcedure.proto   |  1 +
 .../org/apache/hadoop/hbase/master/HMaster.java| 25 
 .../hadoop/hbase/master/MasterRpcServices.java |  2 +-
 .../master/procedure/CloneSnapshotProcedure.java   | 54 +++-
 .../master/procedure/RestoreSnapshotProcedure.java |  7 ++-
 .../hbase/master/snapshot/SnapshotManager.java | 27 +---
 .../storefiletracker/StoreFileTrackerFactory.java  | 39 +++-
 .../hbase/snapshot/RestoreSnapshotHelper.java  | 11 ++--
 .../TestCloneSnapshotFromClientCustomSFT.java  | 71 ++
 .../storefiletracker/TestStoreFileTracker.java |  2 -
 .../TestStoreFileTrackerFactory.java   | 52 
 .../hbase/rsgroup/VerifyingRSGroupAdmin.java   |  5 +-
 hbase-shell/src/main/ruby/hbase/admin.rb   |  4 +-
 hbase-shell/src/main/ruby/hbase_constants.rb   |  1 +
 .../src/main/ruby/shell/commands/clone_snapshot.rb |  6 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |  4 +-
 24 files changed, 357 insertions(+), 61 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 48893cc..6c36660 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.yetus.audience.InterfaceStability;
 
 /**
  * The administrative API for HBase. Obtain an instance from {@link 
Connection#getAdmin()} and
@@ -1620,7 +1621,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IllegalArgumentException if the restore request is formatted 
incorrectly
*/
   void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, 
boolean restoreAcl)
-  throws IOException, RestoreSnapshotException;
+throws IOException, RestoreSnapshotException;
 
   /**
* Create a new table by cloning the snapshot content.
@@ -1633,7 +1634,25 @@ public interface Admin extends Abortable, Closeable {
*/
   default void cloneSnapshot(String snapshotName, TableName tableName)
   throws IOException, TableExistsException, RestoreSnapshotException {
-cloneSnapshot(snapshotName, tableName, false);
+cloneSnapshot(snapshotName, tableName, false, null);
+  }
+
+  /**
+   * Create a new table by cloning the snapshot content.
+   * @param snapshotName name of the snapshot to be cloned
+   * @param tableName name of the table where the snapshot will be restored
+   * @param restoreAcl true to clone acl into newly created table
+   * @param customSFT specify the StoreFileTracker used for the table
+   * @throws IOException if a remote or network exception occurs
+   * @throws TableExistsException if table to be created already exists
+   * @throws RestoreSnapshotException if snapshot failed to be cloned
+   * @throws IllegalArgumentException if the specified table has not a valid 
name
+   */
+  default void cloneSnapshot(String snapshotName, TableName tableName, boolean 
restoreAcl,
+String customSFT)
+throws IOException, TableExistsException, RestoreSnapshotException {
+get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), 
getSyncWaitTimeout(),
+  TimeUnit.MILLISECONDS);
   }
 
   /**
@@ -1680,8 +1699,25 @@ public interface Admin extends Abortable, Closeable {
* @throws RestoreSnapshotException if snapshot failed to be cloned
* @throws IllegalArgumentException if the specified table has not a valid 
name
*/
-  Future cloneSnapshotAsync(String snapshotName, TableName tableName, 
boolean restoreAcl)
-  throws IOException, TableExistsException, RestoreSnapshotException;
+  default Future cloneSnapshotAsync(String snapshotName, TableName 
tableName

[hbase] 16/16: HBASE-26265 Update ref guide to mention the new store file tracker im… (#3942)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit f16b7b1bfa49a68029c8ec7c1882ff4bd5536593
Author: Wellington Ramos Chevreuil 
AuthorDate: Thu Dec 16 21:07:38 2021 +

HBASE-26265 Update ref guide to mention the new store file tracker im… 
(#3942)
---
 .../asciidoc/_chapters/store_file_tracking.adoc| 145 +
 src/main/asciidoc/book.adoc|   1 +
 2 files changed, 146 insertions(+)

diff --git a/src/main/asciidoc/_chapters/store_file_tracking.adoc 
b/src/main/asciidoc/_chapters/store_file_tracking.adoc
new file mode 100644
index 000..74d802f
--- /dev/null
+++ b/src/main/asciidoc/_chapters/store_file_tracking.adoc
@@ -0,0 +1,145 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[storefiletracking]]
+= Store File Tracking
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+== Overview
+
+This feature introduces an abstraction layer to track store files still 
used/needed by store
+engines, allowing for plugging different approaches of identifying store
+files required by the given store.
+
+Historically, HBase internals have relied on creating hfiles on temporary 
directories first, renaming
+those files to the actual store directory at operation commit time. That's a 
simple and convenient
+way to separate transient from already finalised files that are ready to serve 
client reads with data.
+This approach works well with strong consistent file systems, but with the 
popularity of less consistent
+file systems, mainly Object Store which can be used like file systems, 
dependency on atomic rename operations starts to introduce
+performance penalties. The Amazon S3 Object Store, in particular, has been the 
most affected deployment,
+due to its lack of atomic renames. The HBase community temporarily bypassed 
this problem by building a distributed locking layer called HBOSS,
+to guarantee atomicity of operations against S3.
+
+With *Store File Tracking*, decision on where to originally create new hfiles 
and how to proceed upon
+commit is delegated to the specific Store File Tracking implementation.
+The implementation can be set at the HBase service leve in *hbase-site.xml* or 
at the
+Table or Column Family via the TableDescriptor configuration.
+
+NOTE: When the store file tracking implementation is specified in 
*hbase_site.xml*, this configuration is also propagated into a tables 
configuration
+at table creation time. This is to avoid dangerous configuration mismatches 
between processes, which
+could potentially lead to data loss.
+
+== Available Implementations
+
+Store File Tracking initial version provides three builtin implementations:
+
+* DEFAULT
+* FILE
+* MIGRATION
+
+### DEFAULT
+
+As per the name, this is the Store File Tracking implementation used by 
default when no explicit
+configuration has been defined. The DEFAULT tracker implements the standard 
approach using temporary
+directories and renames. This is how all previous (implicit) implementation 
that HBase used to track store files.
+
+### FILE
+
+A file tracker implementation that creates new files straight in the store 
directory, avoiding the
+need for rename operations. It keeps a list of committed hfiles in memory, 
backed by meta files, in
+each store directory. Whenever a new hfile is committed, the list of _tracked 
files_ in the given
+store is updated and a new meta file is written with this list contents, 
discarding the previous
+meta file now containing an out dated list.
+
+### MIGRATION
+
+A special implementation to be used when swapping between Store File Tracking 
implementations on
+pre-existing tables that already contain data, and therefore, files being 
tracked under an specific
+logic.
+
+== Usage
+
+For fresh deployments that don't yet contain any user data, *FILE* 
implementation can be just set as
+value for *hbase.store.file-tracker.impl* property in global *hbase-site.xml* 
configuration, prior
+to the first hbase start. Omitting this property sets the *DEFAULT* 
implementation.
+
+For clusters with da

[hbase] 14/16: HBASE-26454 CreateTableProcedure still relies on temp dir and renames… (#3845)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d00b5faade4e40f2127fe2b18bf97dae69ce33f6
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Nov 19 12:16:29 2021 +

HBASE-26454 CreateTableProcedure still relies on temp dir and renames… 
(#3845)

Signed-off-by: Duo Zhang 
---
 .../master/procedure/CreateTableProcedure.java | 30 ++-
 .../master/procedure/DeleteTableProcedure.java | 95 +++---
 .../access/SnapshotScannerHDFSAclHelper.java   |  4 +-
 .../hadoop/hbase/master/TestMasterFileSystem.java  | 29 ++-
 .../master/procedure/TestDeleteTableProcedure.java | 66 ---
 5 files changed, 43 insertions(+), 181 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 0a6a469..d77b95f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -23,7 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.function.Supplier;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -336,41 +335,22 @@ public class CreateTableProcedure
   final TableDescriptor tableDescriptor, List newRegions,
   final CreateHdfsRegions hdfsRegionHandler) throws IOException {
 final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-final Path tempdir = mfs.getTempDir();
 
 // 1. Create Table Descriptor
 // using a copy of descriptor, table will be created enabling first
-final Path tempTableDir = CommonFSUtils.getTableDir(tempdir, 
tableDescriptor.getTableName());
+final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(),
+  tableDescriptor.getTableName());
 ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
-.createTableDescriptorForTableDirectory(tempTableDir, tableDescriptor, 
false);
+.createTableDescriptorForTableDirectory(
+  tableDir, tableDescriptor, false);
 
 // 2. Create Regions
-newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
+newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(),
 tableDescriptor.getTableName(), newRegions);
 
-// 3. Move Table temp directory to the hbase root location
-moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
-
 return newRegions;
   }
 
-  protected static void moveTempDirectoryToHBaseRoot(
-final MasterProcedureEnv env,
-final TableDescriptor tableDescriptor,
-final Path tempTableDir) throws IOException {
-final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-final Path tableDir =
-  CommonFSUtils.getTableDir(mfs.getRootDir(), 
tableDescriptor.getTableName());
-FileSystem fs = mfs.getFileSystem();
-if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
-  throw new IOException("Couldn't delete " + tableDir);
-}
-if (!fs.rename(tempTableDir, tableDir)) {
-  throw new IOException("Unable to move table from temp=" + tempTableDir +
-" to hbase root=" + tableDir);
-}
-  }
-
   protected static List addTableToMeta(final MasterProcedureEnv 
env,
 final TableDescriptor tableDescriptor, final List regions) 
throws IOException {
 assert (regions != null && regions.size() > 0) : "expected at least 1 
region, got " + regions;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 80dddc7..297efc2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -20,10 +20,8 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -277,82 +275,49 @@ public class DeleteTableProcedure
   final boolean archive) throws IOException {
 final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
 final FileSystem fs = mfs.getFileSystem();
-final 

[hbase] 09/16: HBASE-26326 CreateTableProcedure fails when FileBasedStoreFileTracker… (#3721)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 06db852aa009240f8baa4a79e7f688e82a17b8d0
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Oct 13 15:48:13 2021 +0100

HBASE-26326 CreateTableProcedure fails when FileBasedStoreFileTracker… 
(#3721)

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../hbase/master/procedure/CreateTableProcedure.java |  6 ++
 .../storefiletracker/FileBasedStoreFileTracker.java  |  9 -
 .../storefiletracker/MigrationStoreFileTracker.java  | 12 +---
 .../storefiletracker/StoreFileTracker.java   |  5 +++--
 .../storefiletracker/StoreFileTrackerBase.java   |  9 -
 .../storefiletracker/StoreFileTrackerFactory.java| 20 ++--
 .../master/procedure/TestCreateTableProcedure.java   | 16 
 .../storefiletracker/TestStoreFileTracker.java   |  2 +-
 8 files changed, 49 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index ee8e51f..0a6a469 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -290,9 +289,8 @@ public class CreateTableProcedure
 (newRegions != null ? newRegions.size() : 0));
 }
 
-TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableDescriptor);
-StoreFileTrackerFactory.persistTrackerConfig(env.getMasterConfiguration(), 
builder);
-tableDescriptor = builder.build();
+tableDescriptor = 
StoreFileTrackerFactory.updateWithTrackerConfigs(env.getMasterConfiguration(),
+  tableDescriptor);
 
 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
 if (cpHost != null) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
index c370b87..4da7911 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
@@ -56,7 +56,14 @@ class FileBasedStoreFileTracker extends StoreFileTrackerBase 
{
 
   public FileBasedStoreFileTracker(Configuration conf, boolean 
isPrimaryReplica, StoreContext ctx) {
 super(conf, isPrimaryReplica, ctx);
-backedFile = new StoreFileListFile(ctx);
+//CreateTableProcedure needs to instantiate the configured SFT impl, in 
order to update table
+//descriptors with the SFT impl specific configs. By the time this 
happens, the table has no
+//regions nor stores yet, so it can't create a proper StoreContext.
+if (ctx != null) {
+  backedFile = new StoreFileListFile(ctx);
+} else {
+  backedFile = null;
+}
   }
 
   @Override
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
index 1946d4b..230c1ec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.regionserver.StoreContext;
@@ -88,17 +89,6 @@ class MigrationStoreFileTracker extends StoreFileTrackerBase 
{
   "Should not call this method on " + getClass().getSimpleName());
   }
 
-  @Override
-  public void persistConfiguration(TableDescriptorBuilder builder) {
-super.persistConfiguration(builder);
-if (StringUtils.isEmpty(builder.getValue(SRC_IMPL))) {
-  builde

[hbase] 13/16: HBASE-26271 Cleanup the broken store files under data directory (#3786)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a288365f929f46295dff88608f3a1660b9e98a7c
Author: BukrosSzabolcs 
AuthorDate: Tue Nov 9 17:19:00 2021 +0100

HBASE-26271 Cleanup the broken store files under data directory (#3786)

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
Signed-off-by: Wellington Ramos Chevreuil 
---
 .../hadoop/hbase/mob/DefaultMobStoreCompactor.java |   6 +-
 .../regionserver/AbstractMultiFileWriter.java  |   6 +-
 .../hbase/regionserver/BrokenStoreFileCleaner.java | 202 ++
 .../regionserver/DateTieredMultiFileWriter.java|   2 +-
 .../hbase/regionserver/HRegionFileSystem.java  |   2 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  27 +++
 .../apache/hadoop/hbase/regionserver/HStore.java   |   6 +
 .../hadoop/hbase/regionserver/StoreEngine.java |  21 ++
 .../hbase/regionserver/StripeMultiFileWriter.java  |   2 +-
 .../compactions/AbstractMultiOutputCompactor.java  |   4 +-
 .../hbase/regionserver/compactions/Compactor.java  |  45 -
 .../compactions/DateTieredCompactor.java   |   6 +-
 .../regionserver/compactions/DefaultCompactor.java |   9 +-
 .../regionserver/compactions/StripeCompactor.java  |   2 +-
 .../FileBasedStoreFileTracker.java |   2 +-
 .../MigrationStoreFileTracker.java |   2 +-
 .../storefiletracker/StoreFileTracker.java |   6 +
 .../storefiletracker/StoreFileTrackerBase.java |   6 -
 .../hbase/snapshot/RestoreSnapshotHelper.java  |   2 +-
 .../hadoop/hbase/mob/FaultyMobStoreCompactor.java  |   3 +-
 .../regionserver/TestBrokenStoreFileCleaner.java   | 225 +
 .../hbase/regionserver/TestCompactorMemLeak.java   |   4 +-
 .../storefiletracker/TestStoreFileTracker.java |   1 -
 23 files changed, 556 insertions(+), 35 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index 01fe000..15f0a73 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.CellSink;
 import org.apache.hadoop.hbase.regionserver.HMobStore;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
@@ -286,7 +285,6 @@ public class DefaultMobStoreCompactor extends 
DefaultCompactor {
* 
* @param fd File details
* @param scanner Where to read from.
-   * @param writer Where to write to.
* @param smallestReadPoint Smallest read point.
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is 
<= smallestReadPoint
* @param throughputController The compaction throughput controller.
@@ -295,7 +293,7 @@ public class DefaultMobStoreCompactor extends 
DefaultCompactor {
* @return Whether compaction ended; false if it was interrupted for any 
reason.
*/
   @Override
-  protected boolean performCompaction(FileDetails fd, InternalScanner scanner, 
CellSink writer,
+  protected boolean performCompaction(FileDetails fd, InternalScanner scanner,
   long smallestReadPoint, boolean cleanSeqId, ThroughputController 
throughputController,
   boolean major, int numofFilesToCompact) throws IOException {
 long bytesWrittenProgressForLog = 0;
@@ -665,7 +663,7 @@ public class DefaultMobStoreCompactor extends 
DefaultCompactor {
 
 
   @Override
-  protected List commitWriter(StoreFileWriter writer, FileDetails fd,
+  protected List commitWriter(FileDetails fd,
   CompactionRequestImpl request) throws IOException {
 List newFiles = Lists.newArrayList(writer.getPath());
 writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), 
request.getFiles());
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index f250304..82c3867 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -110,7 +110,11 @@ public abstract class AbstractMultiFileWriter implements 
CellSink, ShipperListen
 return paths;
   }
 
-  protected abstract Collection writers();
+  /**
+   * Returns all writers. This is used to prevent deleting currently writen 
storefiles
+   * during cleanup.
+   */
+  public abstract Collection writ

[hbase] 11/16: HBASE-26328 Clone snapshot doesn't load reference files into FILE SFT impl (#3749)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 08d117197cb9de241414b1a22a037d106f7687f2
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Oct 22 16:56:15 2021 +0100

HBASE-26328 Clone snapshot doesn't load reference files into FILE SFT impl 
(#3749)

Signed-off-by: Duo Zhang 
---
 .../java/org/apache/hadoop/hbase/io/HFileLink.java | 63 ++
 .../master/procedure/CloneSnapshotProcedure.java   | 52 +++-
 .../storefiletracker/StoreFileTrackerBase.java |  1 +
 .../hbase/snapshot/RestoreSnapshotHelper.java  | 95 ++
 .../TestCloneSnapshotProcedureFileBasedSFT.java| 42 ++
 5 files changed, 157 insertions(+), 96 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index 74836ce..fbed724 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -201,7 +201,6 @@ public class HFileLink extends FileLink {
 return isHFileLink(path.getName());
   }
 
-
   /**
* @param fileName File name to check.
* @return True if the path is a HFileLink.
@@ -322,10 +321,10 @@ public class HFileLink extends FileLink {
* @param dstFamilyPath - Destination path (table/region/cf/)
* @param hfileRegionInfo - Linked HFile Region Info
* @param hfileName - Linked HFile name
-   * @return true if the file is created, otherwise the file exists.
-   * @throws IOException on file or parent directory creation failure
+   * @return the file link name.
+   * @throws IOException on file or parent directory creation failure.
*/
-  public static boolean create(final Configuration conf, final FileSystem fs,
+  public static String create(final Configuration conf, final FileSystem fs,
   final Path dstFamilyPath, final RegionInfo hfileRegionInfo,
   final String hfileName) throws IOException {
 return create(conf, fs, dstFamilyPath, hfileRegionInfo, hfileName, true);
@@ -343,10 +342,10 @@ public class HFileLink extends FileLink {
* @param hfileRegionInfo - Linked HFile Region Info
* @param hfileName - Linked HFile name
* @param createBackRef - Whether back reference should be created. Defaults 
to true.
-   * @return true if the file is created, otherwise the file exists.
-   * @throws IOException on file or parent directory creation failure
+   * @return the file link name.
+   * @throws IOException on file or parent directory creation failure.
*/
-  public static boolean create(final Configuration conf, final FileSystem fs,
+  public static String create(final Configuration conf, final FileSystem fs,
   final Path dstFamilyPath, final RegionInfo hfileRegionInfo,
   final String hfileName, final boolean createBackRef) throws IOException {
 TableName linkedTable = hfileRegionInfo.getTable();
@@ -366,17 +365,18 @@ public class HFileLink extends FileLink {
* @param linkedTable - Linked Table Name
* @param linkedRegion - Linked Region Name
* @param hfileName - Linked HFile name
-   * @return true if the file is created, otherwise the file exists.
-   * @throws IOException on file or parent directory creation failure
+   * @return the file link name.
+   * @throws IOException on file or parent directory creation failure.
*/
-  public static boolean create(final Configuration conf, final FileSystem fs,
+  public static String create(final Configuration conf, final FileSystem fs,
   final Path dstFamilyPath, final TableName linkedTable, final String 
linkedRegion,
   final String hfileName) throws IOException {
 return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, 
hfileName, true);
   }
 
   /**
-   * Create a new HFileLink
+   * Create a new HFileLink. In the event of link creation failure, this 
method throws an
+   * IOException, so that the calling upper laying can decide on how to 
proceed with this.
*
* It also adds a back-reference to the hfile back-reference directory
* to simplify the reference-count and the cleaning process.
@@ -388,10 +388,10 @@ public class HFileLink extends FileLink {
* @param linkedRegion - Linked Region Name
* @param hfileName - Linked HFile name
* @param createBackRef - Whether back reference should be created. Defaults 
to true.
-   * @return true if the file is created, otherwise the file exists.
-   * @throws IOException on file or parent directory creation failure
+   * @return the file link name.
+   * @throws IOException on file or parent directory creation failure.
*/
-  public static boolean create(final Configuration conf, final FileSystem fs,
+  public static String create(final Configuration conf, final FileSystem fs,
   final Path dstFamilyPath, final TableName

[hbase] 12/16: HBASE-26263 [Rolling Upgrading] Persist the StoreFileTracker configurations to TableDescriptor for existing tables (#3700)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 8bec26ea91c08fc1e5cdac658a7ced235a97efba
Author: GeorryHuang 
AuthorDate: Sat Nov 6 22:20:12 2021 +0800

HBASE-26263 [Rolling Upgrading] Persist the StoreFileTracker configurations 
to TableDescriptor for existing tables (#3700)

Signed-off-by: Duo Zhang 
Reviewed-by: Wellington Ramos Chevreuil 
---
 .../org/apache/hadoop/hbase/master/HMaster.java|   6 +
 .../hbase/master/migrate/RollingUpgradeChore.java  | 130 +
 .../MigrateStoreFileTrackerProcedure.java  |  48 
 .../migrate/TestMigrateStoreFileTracker.java   | 107 +
 4 files changed, 291 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 1683b16..b80a2e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -131,6 +131,7 @@ import 
org.apache.hadoop.hbase.master.http.MasterRedirectServlet;
 import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
 import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
 import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
@@ -376,6 +377,7 @@ public class HMaster extends 
HBaseServerBase implements Maste
   private ReplicationBarrierCleaner replicationBarrierCleaner;
   private MobFileCleanerChore mobFileCleanerChore;
   private MobFileCompactionChore mobFileCompactionChore;
+  private RollingUpgradeChore rollingUpgradeChore;
   // used to synchronize the mobCompactionStates
   private final IdLock mobCompactionLock = new IdLock();
   // save the information of mob compactions in tables.
@@ -1222,6 +1224,9 @@ public class HMaster extends 
HBaseServerBase implements Maste
   LOG.debug("Balancer post startup initialization complete, took " + (
   (EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
 }
+
+this.rollingUpgradeChore = new RollingUpgradeChore(this);
+getChoreService().scheduleChore(rollingUpgradeChore);
   }
 
   private void createMissingCFsInMetaDuringUpgrade(
@@ -1713,6 +1718,7 @@ public class HMaster extends 
HBaseServerBase implements Maste
 shutdownChore(snapshotCleanerChore);
 shutdownChore(hbckChore);
 shutdownChore(regionsRecoveryChore);
+shutdownChore(rollingUpgradeChore);
   }
 
   /**
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/migrate/RollingUpgradeChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/migrate/RollingUpgradeChore.java
new file mode 100644
index 000..3896b41
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/migrate/RollingUpgradeChore.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.migrate;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import 
org.apache.hadoop.hbase.regionserver.storefiletracker.M

[hbase] 07/16: HBASE-26264 Add more checks to prevent misconfiguration on store file tracker (#3681)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 5ff0f98a5367326753bc0e06d8726b66d109238c
Author: Duo Zhang 
AuthorDate: Wed Sep 15 23:00:03 2021 +0800

HBASE-26264 Add more checks to prevent misconfiguration on store file 
tracker (#3681)

Signed-off-by: Josh Elser 
---
 .../assignment/MergeTableRegionsProcedure.java |   3 +-
 .../assignment/SplitTableRegionProcedure.java  |   3 +-
 .../master/procedure/CreateTableProcedure.java |   8 +-
 .../master/procedure/ModifyTableProcedure.java |   5 +
 .../hbase/regionserver/HRegionFileSystem.java  |   2 +-
 .../MigrationStoreFileTracker.java |   8 +
 .../storefiletracker/StoreFileTrackerFactory.java  | 173 +--
 .../TestChangeStoreFileTracker.java| 242 +
 8 files changed, 422 insertions(+), 22 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index e6bbe44..e9051da 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -614,8 +614,7 @@ public class MergeTableRegionsProcedure
   String family = hcd.getNameAsString();
   Configuration trackerConfig =
 
StoreFileTrackerFactory.mergeConfigurations(env.getMasterConfiguration(), htd, 
hcd);
-  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
true,
-family, regionFs);
+  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
family, regionFs);
   final Collection storeFiles = tracker.load();
   if (storeFiles != null && storeFiles.size() > 0) {
 final Configuration storeConfiguration =
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index ff16dc5..aa0c938 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -670,8 +670,7 @@ public class SplitTableRegionProcedure
   String family = cfd.getNameAsString();
   Configuration trackerConfig = StoreFileTrackerFactory.
 mergeConfigurations(env.getMasterConfiguration(), htd, 
htd.getColumnFamily(cfd.getName()));
-  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
true,
-family, regionFs);
+  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
family, regionFs);
   Collection sfis = tracker.load();
   if (sfis == null) {
 continue;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index dccea55..ee8e51f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -277,15 +277,17 @@ public class CreateTableProcedure
   MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
 }
 
+// check for store file tracker configurations
+StoreFileTrackerFactory.checkForCreateTable(env.getMasterConfiguration(), 
tableDescriptor);
+
 return true;
   }
 
   private void preCreate(final MasterProcedureEnv env)
   throws IOException, InterruptedException {
 if (!getTableName().isSystemTable()) {
-  ProcedureSyncWait.getMasterQuotaManager(env)
-.checkNamespaceTableAndRegionQuota(
-  getTableName(), (newRegions != null ? newRegions.size() : 0));
+  
ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(),
+(newRegions != null ? newRegions.size() : 0));
 }
 
 TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableDescriptor);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 247dd9c..1640644 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.MasterCopr

[hbase] 05/16: HBASE-26246 Persist the StoreFileTracker configurations to TableDescriptor when creating table (#3666)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 0ee1689332856440334c1a0040215d5c86b9705a
Author: Wellington Ramos Chevreuil 
AuthorDate: Sun Sep 12 14:14:03 2021 +0100

HBASE-26246 Persist the StoreFileTracker configurations to TableDescriptor 
when creating table (#3666)

Signed-off-by: Duo Zhang 
---
 .../master/procedure/CreateTableProcedure.java |  7 ++
 .../hbase/regionserver/HRegionFileSystem.java  |  2 +-
 .../MigrationStoreFileTracker.java |  8 +++
 .../storefiletracker/StoreFileTracker.java |  8 +++
 .../storefiletracker/StoreFileTrackerBase.java | 13 +++
 .../storefiletracker/StoreFileTrackerFactory.java  | 25 +++---
 .../org/apache/hadoop/hbase/client/TestAdmin.java  |  6 ++
 .../org/apache/hadoop/hbase/client/TestAdmin3.java |  6 ++
 .../hbase/client/TestAsyncTableAdminApi.java   |  6 ++
 .../hbase/client/TestAsyncTableAdminApi3.java  |  6 ++
 .../procedure/MasterProcedureTestingUtility.java   |  7 ++
 .../master/procedure/TestCreateTableProcedure.java | 17 +++
 .../storefiletracker/TestStoreFileTracker.java | 14 +---
 13 files changed, 113 insertions(+), 12 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 2313e70..dccea55 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -33,10 +34,12 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -285,6 +288,10 @@ public class CreateTableProcedure
   getTableName(), (newRegions != null ? newRegions.size() : 0));
 }
 
+TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableDescriptor);
+StoreFileTrackerFactory.persistTrackerConfig(env.getMasterConfiguration(), 
builder);
+tableDescriptor = builder.build();
+
 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
 if (cpHost != null) {
   final RegionInfo[] regions = newRegions == null ? null :
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index cb30432..aa0ee27 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -610,7 +610,7 @@ public class HRegionFileSystem {
   writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
   HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
 env.getMasterConfiguration(), fs, getTableDir(), regionInfo, false);
-  insertRegionFilesIntoStoreTracker(allRegionFiles, env, regionFs);
+insertRegionFilesIntoStoreTracker(allRegionFiles, env, regionFs);
 }
 return regionDir;
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
index e486e6d..483a240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.StoreContext;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo

[hbase] 02/16: HBASE-25988 Store the store file list by a file (#3578)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 43b40e9374f0148b5b3a89f6d65611435c5181f6
Author: Duo Zhang 
AuthorDate: Thu Aug 26 18:51:12 2021 +0800

HBASE-25988 Store the store file list by a file (#3578)

Signed-off-by: Wellington Chevreuil 
---
 .../protobuf/server/region/StoreFileTracker.proto  |  29 +++--
 .../hadoop/hbase/regionserver/StoreContext.java|   5 +
 .../hadoop/hbase/regionserver/StoreEngine.java |   8 +-
 .../storefiletracker/DefaultStoreFileTracker.java  |   5 +-
 .../FileBasedStoreFileTracker.java | 142 +
 .../storefiletracker/StoreFileListFile.java| 142 +
 .../storefiletracker/StoreFileTrackerBase.java |  11 +-
 .../storefiletracker/StoreFileTrackerFactory.java  |  12 +-
 .../TestRegionWithFileBasedStoreFileTracker.java   | 109 
 9 files changed, 430 insertions(+), 33 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java
 b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
similarity index 57%
copy from 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java
copy to 
hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
index 4f7231b..2a269ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java
+++ 
b/hbase-protocol-shaded/src/main/protobuf/server/region/StoreFileTracker.proto
@@ -15,21 +15,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase.regionserver.storefiletracker;
+syntax = "proto2";
+// This file contains protocol buffers that are used for store file tracker.
+package hbase.pb;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.StoreContext;
-import org.apache.yetus.audience.InterfaceAudience;
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "StoreFileTrackerProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
 
-/**
- * Factory method for creating store file tracker.
- */
-@InterfaceAudience.Private
-public final class StoreFileTrackerFactory {
+message StoreFileEntry {
+  required string name = 1;
+  required uint64 size = 2;
+}
 
-  public static StoreFileTracker create(Configuration conf, TableName 
tableName,
-boolean isPrimaryReplica, StoreContext ctx) {
-return new DefaultStoreFileTracker(conf, tableName, isPrimaryReplica, ctx);
-  }
+message StoreFileList {
+  required uint64 timestamp = 1;
+  repeated StoreFileEntry store_file = 2;
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
index 2a9f968..588f8f4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java
@@ -22,6 +22,7 @@ import java.util.Collection;
 import java.util.function.Supplier;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.io.HeapSize;
@@ -109,6 +110,10 @@ public final class StoreContext implements HeapSize {
 return coprocessorHost;
   }
 
+  public TableName getTableName() {
+return getRegionInfo().getTable();
+  }
+
   public RegionInfo getRegionInfo() {
 return regionFileSystem.getRegionInfo();
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
index 4033c33..0486729 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
@@ -173,9 +173,9 @@ public abstract class StoreEnginehttp://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.storefiletracker

[hbase] 10/16: HBASE-26386 Refactor StoreFileTracker implementations to expose the set method (#3774)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e4e7cf80b74585e5a1904e658c269a9efa459b74
Author: Duo Zhang 
AuthorDate: Thu Oct 21 10:27:45 2021 +0800

HBASE-26386 Refactor StoreFileTracker implementations to expose the set 
method (#3774)

Signed-off-by: Wellington Chevreuil 
---
 .../regionserver/storefiletracker/DefaultStoreFileTracker.java |  2 +-
 .../storefiletracker/FileBasedStoreFileTracker.java|  2 +-
 .../storefiletracker/MigrationStoreFileTracker.java|  5 +
 .../hbase/regionserver/storefiletracker/StoreFileTracker.java  |  7 +--
 .../regionserver/storefiletracker/StoreFileTrackerBase.java| 10 --
 5 files changed, 8 insertions(+), 18 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
index a13b75b..b1e298d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
@@ -62,7 +62,7 @@ class DefaultStoreFileTracker extends StoreFileTrackerBase {
   }
 
   @Override
-  void set(List files) {
+  public void set(List files) {
 // NOOP
   }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
index 4da7911..079b59b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
@@ -148,7 +148,7 @@ class FileBasedStoreFileTracker extends 
StoreFileTrackerBase {
   }
 
   @Override
-  void set(List files) throws IOException {
+  public void set(List files) throws IOException {
 synchronized (storefiles) {
   storefiles.clear();
   StoreFileList.Builder builder = StoreFileList.newBuilder();
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
index 230c1ec..a6648f2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
@@ -21,9 +21,6 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.regionserver.StoreContext;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -84,7 +81,7 @@ class MigrationStoreFileTracker extends StoreFileTrackerBase {
   }
 
   @Override
-  void set(List files) {
+  public void set(List files) {
 throw new UnsupportedOperationException(
   "Should not call this method on " + getClass().getSimpleName());
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
index fd8f7c9..f56a0dd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.regionserver.storefiletracker;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
-
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.CreateStoreFileWriterParams;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -70,6 +68,11 @@ public interface StoreFileTracker {
 throws IOException;
 
   /**
+   * Set the store files.
+   */
+  void set(List files) throws IOException;
+
+  /**
* Create a writer for writing new store files.
* @return Writer for a new StoreFile
*/
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
 
b/hbase-server/src/main/java/org/apa

[hbase] 08/16: HBASE-26280 Use store file tracker when snapshoting (#3685)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit fc4f6d10e3736c976c0be91e604cbc82d42acdbf
Author: Duo Zhang 
AuthorDate: Fri Sep 17 09:40:44 2021 +0800

HBASE-26280 Use store file tracker when snapshoting (#3685)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Josh Elser 
---
 .../assignment/MergeTableRegionsProcedure.java |  5 +-
 .../assignment/SplitTableRegionProcedure.java  |  5 +-
 .../hbase/regionserver/HRegionFileSystem.java  | 10 +--
 .../storefiletracker/StoreFileTrackerFactory.java  | 17 ++---
 .../hadoop/hbase/snapshot/SnapshotManifest.java| 42 ++--
 ...oneSnapshotFromClientCloneLinksAfterDelete.java |  4 +-
 .../hbase/client/TestMobSnapshotFromClient.java|  7 +-
 .../hbase/client/TestSnapshotFromClient.java   | 30 +++--
 .../hadoop/hbase/regionserver/TestHStoreFile.java  |  6 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java| 74 +++---
 .../hbase/snapshot/SnapshotTestingUtils.java   | 16 ++---
 11 files changed, 107 insertions(+), 109 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index e9051da..0f41db5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -612,9 +612,8 @@ public class MergeTableRegionsProcedure
 List mergedFiles = new ArrayList<>();
 for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
   String family = hcd.getNameAsString();
-  Configuration trackerConfig =
-
StoreFileTrackerFactory.mergeConfigurations(env.getMasterConfiguration(), htd, 
hcd);
-  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
family, regionFs);
+  StoreFileTracker tracker =
+StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, hcd, 
regionFs);
   final Collection storeFiles = tracker.load();
   if (storeFiles != null && storeFiles.size() > 0) {
 final Configuration storeConfiguration =
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index aa0c938..effdba4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -668,9 +668,8 @@ public class SplitTableRegionProcedure
 new HashMap>(htd.getColumnFamilyCount());
 for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
   String family = cfd.getNameAsString();
-  Configuration trackerConfig = StoreFileTrackerFactory.
-mergeConfigurations(env.getMasterConfiguration(), htd, 
htd.getColumnFamily(cfd.getName()));
-  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
family, regionFs);
+  StoreFileTracker tracker =
+StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, 
regionFs);
   Collection sfis = tracker.load();
   if (sfis == null) {
 continue;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index e78d8ad..8110025 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -598,7 +598,6 @@ public class HRegionFileSystem {
* to the proper location in the filesystem.
*
* @param regionInfo daughter {@link 
org.apache.hadoop.hbase.client.RegionInfo}
-   * @throws IOException
*/
   public Path commitDaughterRegion(final RegionInfo regionInfo, List 
allRegionFiles,
   MasterProcedureEnv env) throws IOException {
@@ -625,12 +624,8 @@ public class HRegionFileSystem {
 Map> fileInfoMap = new HashMap<>();
 for(Path file : allFiles) {
   String familyName = file.getParent().getName();
-  trackerMap.computeIfAbsent(familyName, t -> {
-Configuration config = 
StoreFileTrackerFactory.mergeConfigurations(conf, tblDesc,
-  tblDesc.getColumnFamily(Bytes.toBytes(familyName)));
-return StoreFileTrackerFactory.
-  create(config, familyName, regionFs);
-  });
+  trackerMap.computeIfAbsent(familyName, t -> 
StoreFileTrackerFactory.create(conf, tblDesc,
+tblDesc.getColumnFamily(B

[hbase] 06/16: HBASE-26248 Should find a suitable way to let users specify the store file tracker implementation (#3665)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 2052e80e5da50ec0598cc7c873de0357a4f894a5
Author: Duo Zhang 
AuthorDate: Tue Sep 14 16:28:21 2021 +0800

HBASE-26248 Should find a suitable way to let users specify the store file 
tracker implementation (#3665)

Signed-off-by: Wellington Chevreuil 
---
 .../MigrationStoreFileTracker.java | 13 ++-
 .../storefiletracker/StoreFileTracker.java |  7 +-
 .../storefiletracker/StoreFileTrackerBase.java | 12 +--
 .../storefiletracker/StoreFileTrackerFactory.java  | 97 +++---
 .../org/apache/hadoop/hbase/client/TestAdmin.java  |  6 +-
 .../org/apache/hadoop/hbase/client/TestAdmin3.java |  6 +-
 .../hbase/client/TestAsyncTableAdminApi.java   |  6 +-
 .../hbase/client/TestAsyncTableAdminApi3.java  |  6 +-
 .../procedure/MasterProcedureTestingUtility.java   |  6 +-
 .../master/procedure/TestCreateTableProcedure.java |  6 +-
 .../regionserver/TestMergesSplitsAddToTracker.java |  4 +-
 .../TestMigrationStoreFileTracker.java | 27 +++---
 .../TestRegionWithFileBasedStoreFileTracker.java   |  3 +-
 .../TestStoreFileTrackerFactory.java   | 58 +
 14 files changed, 202 insertions(+), 55 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
index 483a240..3eeef90 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
@@ -22,6 +22,7 @@ import java.util.Collection;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.regionserver.StoreContext;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -44,8 +45,8 @@ class MigrationStoreFileTracker extends StoreFileTrackerBase {
 
   public MigrationStoreFileTracker(Configuration conf, boolean 
isPrimaryReplica, StoreContext ctx) {
 super(conf, isPrimaryReplica, ctx);
-this.src = StoreFileTrackerFactory.create(conf, SRC_IMPL, 
isPrimaryReplica, ctx);
-this.dst = StoreFileTrackerFactory.create(conf, DST_IMPL, 
isPrimaryReplica, ctx);
+this.src = StoreFileTrackerFactory.createForMigration(conf, SRC_IMPL, 
isPrimaryReplica, ctx);
+this.dst = StoreFileTrackerFactory.createForMigration(conf, DST_IMPL, 
isPrimaryReplica, ctx);
 Preconditions.checkArgument(!src.getClass().equals(dst.getClass()),
   "src and dst is the same: %s", src.getClass());
   }
@@ -90,7 +91,11 @@ class MigrationStoreFileTracker extends StoreFileTrackerBase 
{
   @Override
   public void persistConfiguration(TableDescriptorBuilder builder) {
 super.persistConfiguration(builder);
-builder.setValue(SRC_IMPL, src.getClass().getName());
-builder.setValue(DST_IMPL, dst.getClass().getName());
+if (StringUtils.isEmpty(builder.getValue(SRC_IMPL))) {
+  builder.setValue(SRC_IMPL, src.getTrackerName());
+}
+if (StringUtils.isEmpty(builder.getValue(DST_IMPL))) {
+  builder.setValue(DST_IMPL, dst.getTrackerName());
+}
   }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
index 81fa1a9..59fe7ef 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java
@@ -75,7 +75,12 @@ public interface StoreFileTracker {
   StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws 
IOException;
 
   /**
-   * Saves StoreFileTracker implementations specific configs into the table 
descriptors.
+   * Saves StoreFileTracker implementations specific configurations into the 
table descriptors.
+   * 
+   * This is used to avoid accidentally data loss when changing the cluster 
level store file tracker
+   * implementation, and also possible misconfiguration between master and 
region servers.
+   * 
+   * See HBASE-26246 for more details.
* @param builder The table descriptor builder for the given table.
*/
   void persistConfiguration(TableDescriptorBuilder builder);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
 
b/hbase-server/src/main/java/org/apache/ha

[hbase] 01/16: HBASE-26064 Introduce a StoreFileTracker to abstract the store file tracking logic

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6aaef89789792cd7dd1483d10276273b20a35fa3
Author: Duo Zhang 
AuthorDate: Thu Jul 29 18:35:19 2021 +0800

HBASE-26064 Introduce a StoreFileTracker to abstract the store file 
tracking logic

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/mob/DefaultMobStoreCompactor.java |  22 +-
 .../hadoop/hbase/mob/DefaultMobStoreFlusher.java   |   4 +-
 .../regionserver/CreateStoreFileWriterParams.java  | 134 
 .../hbase/regionserver/DateTieredStoreEngine.java  |   5 +-
 .../hbase/regionserver/DefaultStoreEngine.java |   5 +-
 .../hbase/regionserver/DefaultStoreFlusher.java|  11 +-
 .../hadoop/hbase/regionserver/HMobStore.java   |   3 +-
 .../hbase/regionserver/HRegionFileSystem.java  |  10 +-
 .../apache/hadoop/hbase/regionserver/HStore.java   | 729 +
 .../hadoop/hbase/regionserver/StoreContext.java|   9 +
 .../hadoop/hbase/regionserver/StoreEngine.java | 461 -
 .../hbase/regionserver/StoreFileManager.java   |   9 +
 .../hadoop/hbase/regionserver/StoreFlusher.java|   9 +-
 .../hadoop/hbase/regionserver/StoreUtils.java  |  37 +-
 .../hbase/regionserver/StripeStoreEngine.java  |   9 +-
 .../hbase/regionserver/StripeStoreFlusher.java |   9 +-
 .../compactions/AbstractMultiOutputCompactor.java  |   7 +-
 .../hbase/regionserver/compactions/Compactor.java  |  36 +-
 .../regionserver/compactions/DefaultCompactor.java |  16 +-
 .../storefiletracker/DefaultStoreFileTracker.java  |  61 ++
 .../storefiletracker/StoreFileTracker.java |  75 +++
 .../storefiletracker/StoreFileTrackerBase.java | 178 +
 .../storefiletracker/StoreFileTrackerFactory.java  |  35 +
 .../util/compaction/MajorCompactionRequest.java|   1 -
 .../org/apache/hadoop/hbase/TestIOFencing.java |  12 +-
 .../regionserver/TestCacheOnWriteInSchema.java |   6 +-
 .../hbase/regionserver/TestDefaultStoreEngine.java |   5 +-
 .../hadoop/hbase/regionserver/TestHRegion.java |   4 +-
 .../hadoop/hbase/regionserver/TestHStore.java  |  33 +-
 .../TestRegionMergeTransactionOnCluster.java   |   6 +-
 .../regionserver/TestStoreFileRefresherChore.java  |   3 +-
 .../regionserver/TestStoreScannerClosure.java  |   6 +-
 .../hbase/regionserver/TestStripeStoreEngine.java  |   2 +
 .../compactions/TestDateTieredCompactor.java   |  12 +-
 .../compactions/TestStripeCompactionPolicy.java|  12 +-
 .../compactions/TestStripeCompactor.java   |  12 +-
 36 files changed, 1261 insertions(+), 727 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index c45fdff..01fe000 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -29,7 +29,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map.Entry;
 import java.util.Optional;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -144,17 +143,16 @@ public class DefaultMobStoreCompactor extends 
DefaultCompactor {
   };
 
   private final CellSinkFactory writerFactory =
-  new CellSinkFactory() {
-@Override
-public StoreFileWriter createWriter(InternalScanner scanner,
-
org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
-boolean shouldDropBehind, boolean major) throws IOException {
-  // make this writer with tags always because of possible new cells 
with tags.
-  return store.createWriterInTmp(fd.maxKeyCount,
-major ? majorCompactionCompression : minorCompactionCompression,
-true, true, true, shouldDropBehind);
-}
-  };
+new CellSinkFactory() {
+  @Override
+  public StoreFileWriter createWriter(InternalScanner scanner,
+org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails 
fd,
+boolean shouldDropBehind, boolean major) throws IOException {
+// make this writer with tags always because of possible new cells 
with tags.
+return store.getStoreEngine().createWriter(
+  createParams(fd, shouldDropBehind, 
major).includeMVCCReadpoint(true).includesTag(true));
+  }
+};
 
   public DefaultMobStoreCompactor(Configuration conf, HStore store) {
 super(conf, store);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 480b85c..4a1dc7b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob

[hbase] 04/16: HBASE-26224 Introduce a MigrationStoreFileTracker to support migrating from different store file tracker implementations (#3656)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 090b2fecf40491d46e0a650853266c9262bcafd1
Author: Duo Zhang 
AuthorDate: Thu Sep 9 12:11:25 2021 +0800

HBASE-26224 Introduce a MigrationStoreFileTracker to support migrating from 
different store file tracker implementations (#3656)

Signed-off-by: Wellington Chevreuil 
---
 .../storefiletracker/DefaultStoreFileTracker.java  |  10 +-
 .../FileBasedStoreFileTracker.java |  15 +-
 .../MigrationStoreFileTracker.java |  88 ++
 .../storefiletracker/StoreFileListFile.java|   6 +-
 .../storefiletracker/StoreFileTrackerBase.java |  12 +-
 .../storefiletracker/StoreFileTrackerFactory.java  |  40 +++--
 .../TestMigrationStoreFileTracker.java | 193 +
 7 files changed, 343 insertions(+), 21 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
index 22e0513..a13b75b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver.storefiletracker;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 
@@ -39,7 +40,9 @@ class DefaultStoreFileTracker extends StoreFileTrackerBase {
 
   @Override
   public List load() throws IOException {
-return 
ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString());
+List files =
+  
ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString());
+return files != null ? files : Collections.emptyList();
   }
 
   @Override
@@ -57,4 +60,9 @@ class DefaultStoreFileTracker extends StoreFileTrackerBase {
 Collection newFiles) throws IOException {
 // NOOP
   }
+
+  @Override
+  void set(List files) {
+// NOOP
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
index de28b0e..c370b87 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java
@@ -48,7 +48,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.StoreFileTrackerProtos.
  * storages.
  */
 @InterfaceAudience.Private
-public class FileBasedStoreFileTracker extends StoreFileTrackerBase {
+class FileBasedStoreFileTracker extends StoreFileTrackerBase {
 
   private final StoreFileListFile backedFile;
 
@@ -139,4 +139,17 @@ public class FileBasedStoreFileTracker extends 
StoreFileTrackerBase {
   }
 }
   }
+
+  @Override
+  void set(List files) throws IOException {
+synchronized (storefiles) {
+  storefiles.clear();
+  StoreFileList.Builder builder = StoreFileList.newBuilder();
+  for (StoreFileInfo info : files) {
+storefiles.put(info.getPath().getName(), info);
+builder.addStoreFile(toStoreFileEntry(info));
+  }
+  backedFile.update(builder);
+}
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
new file mode 100644
index 000..e486e6d
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the Li

[hbase] 03/16: HBASE-26079 Use StoreFileTracker when splitting and merging (#3617)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6e053765e8141f6ad807325828c520b232d2600f
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Sep 8 10:31:49 2021 +0100

HBASE-26079 Use StoreFileTracker when splitting and merging (#3617)

Signed-off-by: Duo Zhang 
---
 .../assignment/MergeTableRegionsProcedure.java |  22 +-
 .../assignment/SplitTableRegionProcedure.java  |  42 ++--
 .../hbase/regionserver/HRegionFileSystem.java  |  42 +++-
 .../storefiletracker/DefaultStoreFileTracker.java  |   4 +-
 .../storefiletracker/StoreFileTracker.java |   1 -
 .../storefiletracker/StoreFileTrackerFactory.java  |  33 ++-
 .../hbase/regionserver/TestDefaultStoreEngine.java |   1 +
 .../regionserver/TestDirectStoreSplitsMerges.java  |  32 ++-
 .../hadoop/hbase/regionserver/TestHStoreFile.java  |  19 +-
 .../regionserver/TestMergesSplitsAddToTracker.java | 262 +
 .../hbase/regionserver/TestStripeStoreEngine.java  |   1 +
 .../storefiletracker/TestStoreFileTracker.java}|  42 ++--
 12 files changed, 434 insertions(+), 67 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index da3d73e..e6bbe44 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -24,7 +24,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.stream.Stream;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,6 +55,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.wal.WALSplitUtil;
@@ -587,30 +588,35 @@ public class MergeTableRegionsProcedure
 final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
 final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), 
regionsToMerge[0].getTable());
 final FileSystem fs = mfs.getFileSystem();
-
+List mergedFiles = new ArrayList<>();
 HRegionFileSystem mergeRegionFs = 
HRegionFileSystem.createRegionOnFileSystem(
   env.getMasterConfiguration(), fs, tableDir, mergedRegion);
 
 for (RegionInfo ri: this.regionsToMerge) {
   HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
   env.getMasterConfiguration(), fs, tableDir, ri, false);
-  mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion);
+  mergedFiles.addAll(mergeStoreFiles(env, regionFs, mergeRegionFs, 
mergedRegion));
 }
 assert mergeRegionFs != null;
-mergeRegionFs.commitMergedRegion();
+mergeRegionFs.commitMergedRegion(mergedFiles, env);
 
 // Prepare to create merged regions
 env.getAssignmentManager().getRegionStates().
 getOrCreateRegionStateNode(mergedRegion).setState(State.MERGING_NEW);
   }
 
-  private void mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem 
regionFs,
+  private List mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem 
regionFs,
 HRegionFileSystem mergeRegionFs, RegionInfo mergedRegion) throws 
IOException {
 final TableDescriptor htd = env.getMasterServices().getTableDescriptors()
   .get(mergedRegion.getTable());
+List mergedFiles = new ArrayList<>();
 for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
   String family = hcd.getNameAsString();
-  final Collection storeFiles = 
regionFs.getStoreFiles(family);
+  Configuration trackerConfig =
+
StoreFileTrackerFactory.mergeConfigurations(env.getMasterConfiguration(), htd, 
hcd);
+  StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, 
true,
+family, regionFs);
+  final Collection storeFiles = tracker.load();
   if (storeFiles != null && storeFiles.size() > 0) {
 final Configuration storeConfiguration =
   StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), 
htd, hcd);
@@ -622,11 +628,13 @@ public class MergeTableRegionsProcedure
   // is running in a regionserver's Store context, or we might not be 
able
   // to read the hfiles.
 

[hbase] branch master updated (6818ec2 -> f16b7b1)

2021-12-22 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from 6818ec2  HBASE-26613 The logic of the method incrementIV in Encryption 
class has problem (#3968)
 new 6aaef897 HBASE-26064 Introduce a StoreFileTracker to abstract the 
store file tracking logic
 new 43b40e9  HBASE-25988 Store the store file list by a file (#3578)
 new 6e05376  HBASE-26079 Use StoreFileTracker when splitting and merging 
(#3617)
 new 090b2fe  HBASE-26224 Introduce a MigrationStoreFileTracker to support 
migrating from different store file tracker implementations (#3656)
 new 0ee1689  HBASE-26246 Persist the StoreFileTracker configurations to 
TableDescriptor when creating table (#3666)
 new 2052e80  HBASE-26248 Should find a suitable way to let users specify 
the store file tracker implementation (#3665)
 new 5ff0f98  HBASE-26264 Add more checks to prevent misconfiguration on 
store file tracker (#3681)
 new fc4f6d1  HBASE-26280 Use store file tracker when snapshoting (#3685)
 new 06db852  HBASE-26326 CreateTableProcedure fails when 
FileBasedStoreFileTracker… (#3721)
 new e4e7cf8  HBASE-26386 Refactor StoreFileTracker implementations to 
expose the set method (#3774)
 new 08d1171  HBASE-26328 Clone snapshot doesn't load reference files into 
FILE SFT impl (#3749)
 new 8bec26e  HBASE-26263 [Rolling Upgrading] Persist the StoreFileTracker 
configurations to TableDescriptor for existing tables (#3700)
 new a288365  HBASE-26271 Cleanup the broken store files under data 
directory (#3786)
 new d00b5fa  HBASE-26454 CreateTableProcedure still relies on temp dir and 
renames… (#3845)
 new 771e552  HBASE-26286: Add support for specifying store file tracker 
when restoring or cloning snapshot
 new f16b7b1  HBASE-26265 Update ref guide to mention the new store file 
tracker im… (#3942)

The 16 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/org/apache/hadoop/hbase/client/Admin.java |  44 +-
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   |   7 +-
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  14 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java   |   6 +-
 .../hbase/client/ColumnFamilyDescriptor.java   |   5 +
 .../client/ColumnFamilyDescriptorBuilder.java  |   6 +
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java|  24 +-
 .../src/main/protobuf/server/master/Master.proto   |   1 +
 .../protobuf/server/master/MasterProcedure.proto   |   1 +
 .../StoreFileTracker.proto}|  16 +-
 .../java/org/apache/hadoop/hbase/io/HFileLink.java |  63 +-
 .../org/apache/hadoop/hbase/master/HMaster.java|  31 +-
 .../hadoop/hbase/master/MasterRpcServices.java |   2 +-
 .../assignment/MergeTableRegionsProcedure.java |  20 +-
 .../assignment/SplitTableRegionProcedure.java  |  40 +-
 .../hbase/master/migrate/RollingUpgradeChore.java  | 130 
 .../master/procedure/CloneSnapshotProcedure.java   | 106 +--
 .../master/procedure/CreateTableProcedure.java |  43 +-
 .../master/procedure/DeleteTableProcedure.java |  95 +--
 .../master/procedure/ModifyTableProcedure.java |   5 +
 .../master/procedure/RestoreSnapshotProcedure.java |   7 +-
 .../hbase/master/snapshot/SnapshotManager.java |  27 +-
 .../hadoop/hbase/mob/DefaultMobStoreCompactor.java |  28 +-
 .../hadoop/hbase/mob/DefaultMobStoreFlusher.java   |   4 +-
 .../regionserver/AbstractMultiFileWriter.java  |   6 +-
 .../hbase/regionserver/BrokenStoreFileCleaner.java | 202 ++
 .../regionserver/CreateStoreFileWriterParams.java  | 134 
 .../regionserver/DateTieredMultiFileWriter.java|   2 +-
 .../hbase/regionserver/DateTieredStoreEngine.java  |   5 +-
 .../hbase/regionserver/DefaultStoreEngine.java |   5 +-
 .../hbase/regionserver/DefaultStoreFlusher.java|  11 +-
 .../hadoop/hbase/regionserver/HMobStore.java   |   3 +-
 .../hbase/regionserver/HRegionFileSystem.java  |  50 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  27 +
 .../apache/hadoop/hbase/regionserver/HStore.java   | 735 +
 .../hadoop/hbase/regionserver/StoreContext.java|  14 +
 .../hadoop/hbase/regionserver/StoreEngine.java | 482 +-
 .../hbase/regionserver/StoreFileManager.java   |   9 +
 .../hadoop/hbase/regionserver/StoreFlusher.java|   9 +-
 .../hadoop/hbase/regionserver/StoreUtils.java  |  37 +-
 .../hbase/regionserver/StripeMultiFileWriter.java  |   2 +-
 .../hbase/regionserver/StripeStoreEngine.java  |   9 +-
 .../hbase/regionserver/StripeStoreFlusher.java |   9 +-
 .../compactions/AbstractMultiOutputCompactor.java  |  11 +-
 .../hbase/regionserver/compactions/C

[hbase] branch HBASE-26067 updated: HBASE-26265 Update ref guide to mention the new store file tracker im… (#3942)

2021-12-16 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch HBASE-26067
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-26067 by this push:
 new 1dbdefc  HBASE-26265 Update ref guide to mention the new store file 
tracker im… (#3942)
1dbdefc is described below

commit 1dbdefc7c165e13de1272d9f03a8ef78980d36e2
Author: Wellington Ramos Chevreuil 
AuthorDate: Thu Dec 16 21:07:38 2021 +

HBASE-26265 Update ref guide to mention the new store file tracker im… 
(#3942)
---
 .../asciidoc/_chapters/store_file_tracking.adoc| 145 +
 src/main/asciidoc/book.adoc|   1 +
 2 files changed, 146 insertions(+)

diff --git a/src/main/asciidoc/_chapters/store_file_tracking.adoc 
b/src/main/asciidoc/_chapters/store_file_tracking.adoc
new file mode 100644
index 000..74d802f
--- /dev/null
+++ b/src/main/asciidoc/_chapters/store_file_tracking.adoc
@@ -0,0 +1,145 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[storefiletracking]]
+= Store File Tracking
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+== Overview
+
+This feature introduces an abstraction layer to track store files still 
used/needed by store
+engines, allowing for plugging different approaches of identifying store
+files required by the given store.
+
+Historically, HBase internals have relied on creating hfiles on temporary 
directories first, renaming
+those files to the actual store directory at operation commit time. That's a 
simple and convenient
+way to separate transient from already finalised files that are ready to serve 
client reads with data.
+This approach works well with strong consistent file systems, but with the 
popularity of less consistent
+file systems, mainly Object Store which can be used like file systems, 
dependency on atomic rename operations starts to introduce
+performance penalties. The Amazon S3 Object Store, in particular, has been the 
most affected deployment,
+due to its lack of atomic renames. The HBase community temporarily bypassed 
this problem by building a distributed locking layer called HBOSS,
+to guarantee atomicity of operations against S3.
+
+With *Store File Tracking*, decision on where to originally create new hfiles 
and how to proceed upon
+commit is delegated to the specific Store File Tracking implementation.
+The implementation can be set at the HBase service leve in *hbase-site.xml* or 
at the
+Table or Column Family via the TableDescriptor configuration.
+
+NOTE: When the store file tracking implementation is specified in 
*hbase_site.xml*, this configuration is also propagated into a tables 
configuration
+at table creation time. This is to avoid dangerous configuration mismatches 
between processes, which
+could potentially lead to data loss.
+
+== Available Implementations
+
+Store File Tracking initial version provides three builtin implementations:
+
+* DEFAULT
+* FILE
+* MIGRATION
+
+### DEFAULT
+
+As per the name, this is the Store File Tracking implementation used by 
default when no explicit
+configuration has been defined. The DEFAULT tracker implements the standard 
approach using temporary
+directories and renames. This is how all previous (implicit) implementation 
that HBase used to track store files.
+
+### FILE
+
+A file tracker implementation that creates new files straight in the store 
directory, avoiding the
+need for rename operations. It keeps a list of committed hfiles in memory, 
backed by meta files, in
+each store directory. Whenever a new hfile is committed, the list of _tracked 
files_ in the given
+store is updated and a new meta file is written with this list contents, 
discarding the previous
+meta file now containing an out dated list.
+
+### MIGRATION
+
+A special implementation to be used when swapping between Store File Tracking 
implementations on
+pre-existing tables that already contain data, and therefore, files being 
tracked under an specific
+logic.
+
+== Usage
+
+For fresh deployments that don't yet contain any user data, *FILE* 
implementation can b

[hbase] branch HBASE-26067 updated: HBASE-26286: Add support for specifying store file tracker when restoring or cloning snapshot

2021-12-15 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch HBASE-26067
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-26067 by this push:
 new 81c9b87  HBASE-26286: Add support for specifying store file tracker 
when restoring or cloning snapshot
81c9b87 is described below

commit 81c9b8793e6b87657106685f6bdbe512d2e7df5c
Author: BukrosSzabolcs 
AuthorDate: Wed Dec 15 20:09:03 2021 -0500

HBASE-26286: Add support for specifying store file tracker when restoring 
or cloning snapshot

Closes #3851

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../java/org/apache/hadoop/hbase/client/Admin.java | 44 --
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   |  7 ++-
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java | 14 -
 .../hadoop/hbase/client/AsyncHBaseAdmin.java   |  6 +-
 .../hbase/client/ColumnFamilyDescriptor.java   |  5 ++
 .../client/ColumnFamilyDescriptorBuilder.java  |  6 ++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java| 24 +---
 .../src/main/protobuf/server/master/Master.proto   |  1 +
 .../protobuf/server/master/MasterProcedure.proto   |  1 +
 .../org/apache/hadoop/hbase/master/HMaster.java| 25 
 .../hadoop/hbase/master/MasterRpcServices.java |  2 +-
 .../master/procedure/CloneSnapshotProcedure.java   | 54 +++-
 .../master/procedure/RestoreSnapshotProcedure.java |  7 ++-
 .../hbase/master/snapshot/SnapshotManager.java | 27 +---
 .../storefiletracker/StoreFileTrackerFactory.java  | 39 +++-
 .../hbase/snapshot/RestoreSnapshotHelper.java  | 11 ++--
 .../TestCloneSnapshotFromClientCustomSFT.java  | 71 ++
 .../storefiletracker/TestStoreFileTracker.java |  2 -
 .../TestStoreFileTrackerFactory.java   | 52 
 .../hbase/rsgroup/VerifyingRSGroupAdmin.java   |  5 +-
 hbase-shell/src/main/ruby/hbase/admin.rb   |  4 +-
 hbase-shell/src/main/ruby/hbase_constants.rb   |  1 +
 .../src/main/ruby/shell/commands/clone_snapshot.rb |  6 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |  4 +-
 24 files changed, 357 insertions(+), 61 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 48893cc..6c36660 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.yetus.audience.InterfaceStability;
 
 /**
  * The administrative API for HBase. Obtain an instance from {@link 
Connection#getAdmin()} and
@@ -1620,7 +1621,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IllegalArgumentException if the restore request is formatted 
incorrectly
*/
   void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, 
boolean restoreAcl)
-  throws IOException, RestoreSnapshotException;
+throws IOException, RestoreSnapshotException;
 
   /**
* Create a new table by cloning the snapshot content.
@@ -1633,7 +1634,25 @@ public interface Admin extends Abortable, Closeable {
*/
   default void cloneSnapshot(String snapshotName, TableName tableName)
   throws IOException, TableExistsException, RestoreSnapshotException {
-cloneSnapshot(snapshotName, tableName, false);
+cloneSnapshot(snapshotName, tableName, false, null);
+  }
+
+  /**
+   * Create a new table by cloning the snapshot content.
+   * @param snapshotName name of the snapshot to be cloned
+   * @param tableName name of the table where the snapshot will be restored
+   * @param restoreAcl true to clone acl into newly created table
+   * @param customSFT specify the StoreFileTracker used for the table
+   * @throws IOException if a remote or network exception occurs
+   * @throws TableExistsException if table to be created already exists
+   * @throws RestoreSnapshotException if snapshot failed to be cloned
+   * @throws IllegalArgumentException if the specified table has not a valid 
name
+   */
+  default void cloneSnapshot(String snapshotName, TableName tableName, boolean 
restoreAcl,
+String customSFT)
+throws IOException, TableExistsException, RestoreSnapshotException {
+get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), 
getSyncWaitTimeout(),
+  TimeUnit.MILLISECONDS);
   }
 
   /**
@@ -1680,8 +1699,25 @@ public interface Admin extends Abortable, Closeable {
* @throws RestoreSnapshotException if snapshot failed to be cloned
* @throws IllegalArgumentException if the specified table has not a valid 
name
*/
-  Future

[hbase] branch branch-2 updated: HBASE-26512 Make timestamp format configurable in HBase shell scan output

2021-12-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 061ccff  HBASE-26512 Make timestamp format configurable in HBase shell 
scan output
061ccff is described below

commit 061ccffb9e796200fc63a1e0a22265707f70c931
Author: Istvan Toth 
AuthorDate: Mon Nov 29 11:34:24 2021 +0100

HBASE-26512 Make timestamp format configurable in HBase shell scan output

Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java |  5 +
 hbase-shell/src/main/ruby/hbase/table.rb  | 11 +--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index e8f8e76..db9ccee 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1685,6 +1685,11 @@ public final class HConstants {
 "hbase.regionserver.slowlog.systable.enabled";
   public static final boolean DEFAULT_SLOW_LOG_SYS_TABLE_ENABLED_KEY = false;
 
+  public static final String SHELL_TIMESTAMP_FORMAT_EPOCH_KEY =
+  "hbase.shell.timestamp.format.epoch";
+
+  public static final boolean DEFAULT_SHELL_TIMESTAMP_FORMAT_EPOCH = false;
+
   /**
* Number of rows in a batch operation above which a warning will be logged.
*/
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 7351676..3fc6c85 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -119,6 +119,9 @@ EOF
   @name = @table.getName.getNameAsString
   @shell = shell
   @converters = {}
+  @timestamp_format_epoch = table.getConfiguration.getBoolean(
+  HConstants::SHELL_TIMESTAMP_FORMAT_EPOCH_KEY,
+  HConstants::DEFAULT_SHELL_TIMESTAMP_FORMAT_EPOCH)
 end
 
 def close
@@ -751,8 +754,12 @@ EOF
 end
 
 def toLocalDateTime(millis)
-  instant = java.time.Instant.ofEpochMilli(millis)
-  return java.time.LocalDateTime.ofInstant(instant, 
java.time.ZoneId.systemDefault()).toString
+  if @timestamp_format_epoch
+return millis
+  else
+instant = java.time.Instant.ofEpochMilli(millis)
+return java.time.LocalDateTime.ofInstant(instant, 
java.time.ZoneId.systemDefault()).toString
+  end
 end
 
 # Make a String of the passed kv


[hbase] branch master updated: HBASE-26512 Make timestamp format configurable in HBase shell scan output

2021-12-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 7845d00  HBASE-26512 Make timestamp format configurable in HBase shell 
scan output
7845d00 is described below

commit 7845d006e6a4bf92e769bc3eec67b763f01fc491
Author: Istvan Toth 
AuthorDate: Mon Nov 29 11:34:24 2021 +0100

HBASE-26512 Make timestamp format configurable in HBase shell scan output

Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java |  5 +
 hbase-shell/src/main/ruby/hbase/table.rb  | 11 +--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 6464158..4aacff1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1602,6 +1602,11 @@ public final class HConstants {
 "hbase.regionserver.slowlog.systable.enabled";
   public static final boolean DEFAULT_SLOW_LOG_SYS_TABLE_ENABLED_KEY = false;
 
+  public static final String SHELL_TIMESTAMP_FORMAT_EPOCH_KEY =
+  "hbase.shell.timestamp.format.epoch";
+
+  public static final boolean DEFAULT_SHELL_TIMESTAMP_FORMAT_EPOCH = false;
+
   /**
* Number of rows in a batch operation above which a warning will be logged.
*/
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index d55e516..0cd917e 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -119,6 +119,9 @@ EOF
   @name = @table.getName.getNameAsString
   @shell = shell
   @converters = {}
+  @timestamp_format_epoch = table.getConfiguration.getBoolean(
+  HConstants::SHELL_TIMESTAMP_FORMAT_EPOCH_KEY,
+  HConstants::DEFAULT_SHELL_TIMESTAMP_FORMAT_EPOCH)
 end
 
 def close
@@ -751,8 +754,12 @@ EOF
 end
 
 def toLocalDateTime(millis)
-  instant = java.time.Instant.ofEpochMilli(millis)
-  return java.time.LocalDateTime.ofInstant(instant, 
java.time.ZoneId.systemDefault()).toString
+  if @timestamp_format_epoch
+return millis
+  else
+instant = java.time.Instant.ofEpochMilli(millis)
+return java.time.LocalDateTime.ofInstant(instant, 
java.time.ZoneId.systemDefault()).toString
+  end
 end
 
 # Make a String of the passed kv


[hbase] branch branch-2.4 updated: HBASE-26512 Make timestamp format configurable in HBase shell scan output

2021-12-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 06ef9e9  HBASE-26512 Make timestamp format configurable in HBase shell 
scan output
06ef9e9 is described below

commit 06ef9e9778ae2e1747b44bf994b241b890f07895
Author: Istvan Toth 
AuthorDate: Mon Nov 29 11:34:24 2021 +0100

HBASE-26512 Make timestamp format configurable in HBase shell scan output

Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java |  5 +
 hbase-shell/src/main/ruby/hbase/table.rb  | 11 +--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 13dc401..dab33d3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1668,6 +1668,11 @@ public final class HConstants {
 "hbase.regionserver.slowlog.systable.enabled";
   public static final boolean DEFAULT_SLOW_LOG_SYS_TABLE_ENABLED_KEY = false;
 
+  public static final String SHELL_TIMESTAMP_FORMAT_EPOCH_KEY =
+  "hbase.shell.timestamp.format.epoch";
+
+  public static final boolean DEFAULT_SHELL_TIMESTAMP_FORMAT_EPOCH = false;
+
   /**
* Number of rows in a batch operation above which a warning will be logged.
*/
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 7351676..3fc6c85 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -119,6 +119,9 @@ EOF
   @name = @table.getName.getNameAsString
   @shell = shell
   @converters = {}
+  @timestamp_format_epoch = table.getConfiguration.getBoolean(
+  HConstants::SHELL_TIMESTAMP_FORMAT_EPOCH_KEY,
+  HConstants::DEFAULT_SHELL_TIMESTAMP_FORMAT_EPOCH)
 end
 
 def close
@@ -751,8 +754,12 @@ EOF
 end
 
 def toLocalDateTime(millis)
-  instant = java.time.Instant.ofEpochMilli(millis)
-  return java.time.LocalDateTime.ofInstant(instant, 
java.time.ZoneId.systemDefault()).toString
+  if @timestamp_format_epoch
+return millis
+  else
+instant = java.time.Instant.ofEpochMilli(millis)
+return java.time.LocalDateTime.ofInstant(instant, 
java.time.ZoneId.systemDefault()).toString
+  end
 end
 
 # Make a String of the passed kv


[hbase-filesystem] 02/02: HBASE-26437 Clean up the znodes for the src after a rename.

2021-11-17 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git

commit 21e1e920b7f15dbb1bfb8b261d39d303ea97f3e2
Author: Josh Elser 
AuthorDate: Wed Nov 10 13:13:32 2021 -0500

HBASE-26437 Clean up the znodes for the src after a rename.

HBOSS was orphaning znodes from the src of a path which is renamed. Over
time, this will result in a very large usage of ZK due to HBOSS.

Add some logging to dump the contents of ZK

Signed-off-by: Wellington Chevreuil 

Closes #29
---
 .../hbase/oss/HBaseObjectStoreSemantics.java   |  20 ++-
 .../hadoop/hbase/oss/sync/TreeLockManager.java |  26 ++-
 .../hadoop/hbase/oss/sync/ZKTreeLockManager.java   |  18 +-
 .../hbase/oss/HBaseObjectStoreSemanticsTest.java   |   4 +
 .../apache/hadoop/hbase/oss/TestZNodeCleanup.java  | 188 +
 .../hadoop/hbase/oss/sync/TestTreeLockManager.java |   2 +-
 hbase-oss/src/test/resources/log4j.properties  |   2 +
 7 files changed, 246 insertions(+), 14 deletions(-)

diff --git 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
index 7669181..65eeb16 100644
--- 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
+++ 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
@@ -24,6 +24,8 @@ import java.net.URI;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
@@ -452,13 +454,21 @@ public class HBaseObjectStoreSemantics extends 
FilterFileSystem {
 long startTime = System.currentTimeMillis();
 long lockAcquiredTime = startTime;
 long doneTime = startTime;
-try (AutoLock l = sync.lockRename(src, dst)) {
+// Future to pass into the AutoLock so it knows if it should clean up.
+final CompletableFuture renameResult = new CompletableFuture<>();
+try (AutoLock l = sync.lockRename(src, dst, renameResult)) {
   lockAcquiredTime = System.currentTimeMillis();
   metrics.updateAcquireRenameLockHisto(lockAcquiredTime- startTime);
-  boolean result = fs.rename(src, dst);
-  doneTime = System.currentTimeMillis();
-  metrics.updateRenameFsOperationHisto(doneTime - lockAcquiredTime);
-  return result;
+  // Defaulting to false in the case that fs.rename throws an exception
+  boolean result = false;
+  try {
+result = fs.rename(src, dst);
+return result;
+  } finally {
+renameResult.complete(result);
+doneTime = System.currentTimeMillis();
+metrics.updateRenameFsOperationHisto(doneTime - lockAcquiredTime);
+  }
 }
 finally {
   long releasedLocksTime = System.currentTimeMillis();
diff --git 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/TreeLockManager.java 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/TreeLockManager.java
index 22209e4..9506262 100644
--- 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/TreeLockManager.java
+++ 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/TreeLockManager.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.hbase.oss.sync;
 import java.io.IOException;
 import java.net.URI;
 import java.util.Arrays;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
@@ -471,7 +474,7 @@ public abstract class TreeLockManager {
* @return AutoCloseable to release both paths
* @throws IOException at any possible IO failure.
*/
-  public AutoLock lockRename(Path rawSrc, Path rawDst) throws IOException {
+  public AutoLock lockRename(Path rawSrc, Path rawDst, Future 
successFuture) throws IOException {
 Path src = norm(rawSrc);
 Path dst = norm(rawDst);
 LOG.debug("About to lock for rename: from {} to {}", src, dst);
@@ -484,8 +487,29 @@ public abstract class TreeLockManager {
 }
 return new AutoLock() {
   public void close() throws IOException {
+// We have to clean up the src znodes:
+//   1. If the rename was successful
+//   2. While we still hold the write lock
 LOG.debug("About to unlock after rename: from {} to {}", src, dst);
 try {
+  Boolean renameSuccess;
+  try {
+renameSuccess = successFuture.get();
+  } catch (InterruptedException | ExecutionException e) {
+LOG.warn("Unable to determine if filesystem rename was 

[hbase-filesystem] branch master updated (04a304e -> 21e1e92)

2021-11-17 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git.


from 04a304e  HBASE-26315 Upgrade commons-io to 2.11.0 in hbase-filesystem 
(#27)
 new 0aaacf6  HBASE-26453 Correct the behavior of isBeneath to not consider 
paths which share a name prefix as beneath one another.
 new 21e1e92  HBASE-26437 Clean up the znodes for the src after a rename.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hbase/oss/HBaseObjectStoreSemantics.java   |  20 ++-
 .../hadoop/hbase/oss/sync/TreeLockManager.java |  26 ++-
 .../hadoop/hbase/oss/sync/ZKTreeLockManager.java   |  45 +++--
 .../hbase/oss/HBaseObjectStoreSemanticsTest.java   |   4 +
 .../apache/hadoop/hbase/oss/TestZNodeCleanup.java  | 188 +
 .../hadoop/hbase/oss/sync/TestTreeLockManager.java |   2 +-
 .../hbase/oss/sync/TestZKTreeLockManager.java} |  34 ++--
 hbase-oss/src/test/resources/log4j.properties  |   2 +
 8 files changed, 289 insertions(+), 32 deletions(-)
 create mode 100644 
hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/TestZNodeCleanup.java
 copy hbase-oss/src/{main/java/org/apache/hadoop/hbase/oss/Constants.java => 
test/java/org/apache/hadoop/hbase/oss/sync/TestZKTreeLockManager.java} (51%)


[hbase-filesystem] 01/02: HBASE-26453 Correct the behavior of isBeneath to not consider paths which share a name prefix as beneath one another.

2021-11-17 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git

commit 0aaacf63cd784566a1f98555839e068ac6a0e8d0
Author: Josh Elser 
AuthorDate: Fri Nov 12 12:57:07 2021 -0500

HBASE-26453 Correct the behavior of isBeneath to not consider paths which 
share a name prefix as beneath one another.

The current implementation of isBeneath fails when given paths of the
form '/foo' and '/foobar' (returning that '/foobar' is beneath '/foo').

Because this method returns incorrect values, it causes Curator mutexes
to be removed and znodes to be removed while they were potentially held.

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/oss/sync/ZKTreeLockManager.java   | 29 ---
 .../hbase/oss/sync/TestZKTreeLockManager.java  | 43 ++
 2 files changed, 67 insertions(+), 5 deletions(-)

diff --git 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/ZKTreeLockManager.java
 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/ZKTreeLockManager.java
index 0b8f819..549d4c1 100644
--- 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/ZKTreeLockManager.java
+++ 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/sync/ZKTreeLockManager.java
@@ -241,7 +241,7 @@ public class ZKTreeLockManager extends TreeLockManager {
 }
   }
 
-  private synchronized void removeInMemoryLocks(Path p) {
+  synchronized void removeInMemoryLocks(Path p) {
 Iterator> iter = 
lockCache.entrySet().iterator();
 while (iter.hasNext()) {
   Entry entry = iter.next();
@@ -252,12 +252,31 @@ public class ZKTreeLockManager extends TreeLockManager {
 }
   }
 
-  private boolean isBeneath(Path parent, Path other) {
-if (parent.equals(other)) {
+  /**
+   * Returns true iff the given path is contained beneath the parent path.
+   *
+   * Specifically, this method will return true if the given path is a 
sub-directory
+   * of the parent or a file in the directory represented by the parent. This 
method
+   * returns false if the parent and the given path are the same. 
+   */
+  boolean isBeneath(Path parent, Path given) {
+if (parent.equals(given)) {
+  return false;
+}
+String parentPathStr = parent.toString();
+String givenPathStr = given.toString();
+int offset = givenPathStr.indexOf(parentPathStr);
+// Is the given path fully contained in some path beneath the parent.
+if (0 != offset) {
   return false;
 }
-// Is `other` fully contained in some path beneath the parent.
-return 0 == other.toString().indexOf(parent.toString());
+// The given path is a substring of the parent path. It might share a 
common name (e.g. /foo and /foo1)
+// or it might be a subdirectory or file in the parent (e.g. /foo and 
/foo/bar)
+String givenRemainer = givenPathStr.substring(parentPathStr.length());
+// If the remainder of the given path starts with a '/', then it's 
contained beneath the parent.
+// If there are additional characters, the given path simple shares a 
prefix in the file/dir represented
+// by the parent.
+return givenRemainer.startsWith(Path.SEPARATOR);
   }
 
   private boolean writeLockBelow(Path p, int level, int maxLevel) throws 
IOException {
diff --git 
a/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/sync/TestZKTreeLockManager.java
 
b/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/sync/TestZKTreeLockManager.java
new file mode 100644
index 000..075f951
--- /dev/null
+++ 
b/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/sync/TestZKTreeLockManager.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.oss.sync;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestZKTreeLockManager {
+  ZKTreeLockManager manager;
+
+  @Before
+  public void setup() {
+this.manager = new ZKTreeLockManager();
+  }
+
+  @Test
+  public void testIsBenea

[hbase] branch branch-2.4 updated: HBASE-26267 Don't try to recover WALs from a WAL dir which doesn't exist (#3679)

2021-11-16 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 7ce597d  HBASE-26267 Don't try to recover WALs from a WAL dir which 
doesn't exist (#3679)
7ce597d is described below

commit 7ce597d34f8ccf3fbc2fa713cac4c0ff32c295ad
Author: Josh Elser 
AuthorDate: Tue Nov 16 16:26:55 2021 -0500

HBASE-26267 Don't try to recover WALs from a WAL dir which doesn't exist 
(#3679)

We currently cause an error to be thrown by trying to list a
non-existent directory. We see that the master region directory exists
on the filesystem, but forget to make sure that the master region's WAL
directory also exists before we try to list it.
---
 .../hadoop/hbase/master/region/MasterRegion.java   | 29 +--
 .../hbase/master/region/MasterRegionTestBase.java  |  8 ++
 .../master/region/TestMasterRegionWALRecovery.java | 96 ++
 3 files changed, 128 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index c966637..dc1aa3c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -222,7 +222,31 @@ public final class MasterRegion {
 if (!walFs.exists(replayEditsDir) && !walFs.mkdirs(replayEditsDir)) {
   throw new IOException("Failed to create replay directory: " + 
replayEditsDir);
 }
+
+// Replay any WALs for the Master Region before opening it.
 Path walsDir = new Path(walRootDir, HREGION_LOGDIR_NAME);
+// In open(...), we expect that the WAL directory for the MasterRegion to 
already exist.
+// This is in contrast to bootstrap() where we create the MasterRegion 
data and WAL dir.
+// However, it's possible that users directly remove the WAL directory. We 
expect walsDir
+// to always exist in normal situations, but we should guard against users 
changing the
+// filesystem outside of HBase's line of sight.
+if (walFs.exists(walsDir)) {
+  replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, serverName, 
replayEditsDir);
+} else {
+  LOG.error("UNEXPECTED: WAL directory for MasterRegion is missing."
+  + " {} is unexpectedly missing.", walsDir);
+}
+
+// Create a new WAL
+WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, 
regionInfo);
+conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
+  replayEditsDir.makeQualified(walFs.getUri(), 
walFs.getWorkingDirectory()).toString());
+return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, 
wal, null, null);
+  }
+
+  private static void replayWALs(Configuration conf, FileSystem walFs, Path 
walRootDir,
+  Path walsDir, RegionInfo regionInfo, String serverName, Path 
replayEditsDir)
+  throws IOException {
 for (FileStatus walDir : walFs.listStatus(walsDir)) {
   if (!walDir.isDirectory()) {
 continue;
@@ -256,11 +280,6 @@ public final class MasterRegion {
   LOG.info("Delete empty local region wal dir {}", deadWALDir);
   walFs.delete(deadWALDir, true);
 }
-
-WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, 
regionInfo);
-conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
-  replayEditsDir.makeQualified(walFs.getUri(), 
walFs.getWorkingDirectory()).toString());
-return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, 
wal, null, null);
   }
 
   public static MasterRegion create(MasterRegionParams params) throws 
IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
index 9082b1d..3f33abe 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
@@ -77,6 +77,14 @@ public class MasterRegionTestBase {
 htu.getConfiguration().setBoolean(MemStoreLAB.USEMSLAB_KEY, false);
 // Runs on local filesystem. Test does not need sync. Turn off checks.
 
htu.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,
 false);
+
+createMasterRegion();
+  }
+
+  /**
+   * Creates a new MasterRegion using an existing {@code htu} on this class.
+   */
+  protected void createMasterRegion() throws IOException {
 configure(htu.getConfiguration());
 choreService = new ChoreService(getClass().getSimpleName());
 cleanerPool = new DirScanPool(htu.getConfiguration());
dif

[hbase] branch branch-2 updated: HBASE-26267 Don't try to recover WALs from a WAL dir which doesn't exist (#3679)

2021-11-16 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 4a8da47  HBASE-26267 Don't try to recover WALs from a WAL dir which 
doesn't exist (#3679)
4a8da47 is described below

commit 4a8da47f74ede9dd6df7b0066a61416ef9f8b803
Author: Josh Elser 
AuthorDate: Tue Nov 16 16:26:55 2021 -0500

HBASE-26267 Don't try to recover WALs from a WAL dir which doesn't exist 
(#3679)

We currently cause an error to be thrown by trying to list a
non-existent directory. We see that the master region directory exists
on the filesystem, but forget to make sure that the master region's WAL
directory also exists before we try to list it.
---
 .../hadoop/hbase/master/region/MasterRegion.java   | 29 +--
 .../hbase/master/region/MasterRegionTestBase.java  |  8 ++
 .../master/region/TestMasterRegionWALRecovery.java | 96 ++
 3 files changed, 128 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index c789738..71950bd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -227,7 +227,31 @@ public final class MasterRegion {
 if (!walFs.exists(replayEditsDir) && !walFs.mkdirs(replayEditsDir)) {
   throw new IOException("Failed to create replay directory: " + 
replayEditsDir);
 }
+
+// Replay any WALs for the Master Region before opening it.
 Path walsDir = new Path(walRootDir, HREGION_LOGDIR_NAME);
+// In open(...), we expect that the WAL directory for the MasterRegion to 
already exist.
+// This is in contrast to bootstrap() where we create the MasterRegion 
data and WAL dir.
+// However, it's possible that users directly remove the WAL directory. We 
expect walsDir
+// to always exist in normal situations, but we should guard against users 
changing the
+// filesystem outside of HBase's line of sight.
+if (walFs.exists(walsDir)) {
+  replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, serverName, 
replayEditsDir);
+} else {
+  LOG.error("UNEXPECTED: WAL directory for MasterRegion is missing."
+  + " {} is unexpectedly missing.", walsDir);
+}
+
+// Create a new WAL
+WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, 
regionInfo);
+conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
+  replayEditsDir.makeQualified(walFs.getUri(), 
walFs.getWorkingDirectory()).toString());
+return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, 
wal, null, null);
+  }
+
+  private static void replayWALs(Configuration conf, FileSystem walFs, Path 
walRootDir,
+  Path walsDir, RegionInfo regionInfo, String serverName, Path 
replayEditsDir)
+  throws IOException {
 for (FileStatus walDir : walFs.listStatus(walsDir)) {
   if (!walDir.isDirectory()) {
 continue;
@@ -261,11 +285,6 @@ public final class MasterRegion {
   LOG.info("Delete empty local region wal dir {}", deadWALDir);
   walFs.delete(deadWALDir, true);
 }
-
-WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, 
regionInfo);
-conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
-  replayEditsDir.makeQualified(walFs.getUri(), 
walFs.getWorkingDirectory()).toString());
-return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, 
wal, null, null);
   }
 
   public static MasterRegion create(MasterRegionParams params) throws 
IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
index 45102dd..27a4b7a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
@@ -78,6 +78,14 @@ public class MasterRegionTestBase {
 htu.getConfiguration().setBoolean(MemStoreLAB.USEMSLAB_KEY, false);
 // Runs on local filesystem. Test does not need sync. Turn off checks.
 
htu.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,
 false);
+
+createMasterRegion();
+  }
+
+  /**
+   * Creates a new MasterRegion using an existing {@code htu} on this class.
+   */
+  protected void createMasterRegion() throws IOException {
 configure(htu.getConfiguration());
 choreService = new ChoreService(getClass().getSimpleName());
 cleanerPool = new DirScanPool(htu.getConfiguration());
dif

[hbase] branch master updated: HBASE-26267 Don't try to recover WALs from a WAL dir which doesn't exist (#3679)

2021-11-16 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 1c47f80  HBASE-26267 Don't try to recover WALs from a WAL dir which 
doesn't exist (#3679)
1c47f80 is described below

commit 1c47f80d83f38f69f0b1c31baad2766a17d0b6d0
Author: Josh Elser 
AuthorDate: Tue Nov 16 16:26:55 2021 -0500

HBASE-26267 Don't try to recover WALs from a WAL dir which doesn't exist 
(#3679)

We currently cause an error to be thrown by trying to list a
non-existent directory. We see that the master region directory exists
on the filesystem, but forget to make sure that the master region's WAL
directory also exists before we try to list it.
---
 .../hadoop/hbase/master/region/MasterRegion.java   | 29 +--
 .../hbase/master/region/MasterRegionTestBase.java  |  8 ++
 .../master/region/TestMasterRegionWALRecovery.java | 96 ++
 3 files changed, 128 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index ad885ea..a794315 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -227,7 +227,31 @@ public final class MasterRegion {
 if (!walFs.exists(replayEditsDir) && !walFs.mkdirs(replayEditsDir)) {
   throw new IOException("Failed to create replay directory: " + 
replayEditsDir);
 }
+
+// Replay any WALs for the Master Region before opening it.
 Path walsDir = new Path(walRootDir, HREGION_LOGDIR_NAME);
+// In open(...), we expect that the WAL directory for the MasterRegion to 
already exist.
+// This is in contrast to bootstrap() where we create the MasterRegion 
data and WAL dir.
+// However, it's possible that users directly remove the WAL directory. We 
expect walsDir
+// to always exist in normal situations, but we should guard against users 
changing the
+// filesystem outside of HBase's line of sight.
+if (walFs.exists(walsDir)) {
+  replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, serverName, 
replayEditsDir);
+} else {
+  LOG.error("UNEXPECTED: WAL directory for MasterRegion is missing."
+  + " {} is unexpectedly missing.", walsDir);
+}
+
+// Create a new WAL
+WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, 
regionInfo);
+conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
+  replayEditsDir.makeQualified(walFs.getUri(), 
walFs.getWorkingDirectory()).toString());
+return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, 
wal, null, null);
+  }
+
+  private static void replayWALs(Configuration conf, FileSystem walFs, Path 
walRootDir,
+  Path walsDir, RegionInfo regionInfo, String serverName, Path 
replayEditsDir)
+  throws IOException {
 for (FileStatus walDir : walFs.listStatus(walsDir)) {
   if (!walDir.isDirectory()) {
 continue;
@@ -261,11 +285,6 @@ public final class MasterRegion {
   LOG.info("Delete empty local region wal dir {}", deadWALDir);
   walFs.delete(deadWALDir, true);
 }
-
-WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, 
regionInfo);
-conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
-  replayEditsDir.makeQualified(walFs.getUri(), 
walFs.getWorkingDirectory()).toString());
-return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, 
wal, null, null);
   }
 
   public static MasterRegion create(MasterRegionParams params) throws 
IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
index 721e4d1..8080a31 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
@@ -80,6 +80,14 @@ public class MasterRegionTestBase {
 htu.getConfiguration().setBoolean(MemStoreLAB.USEMSLAB_KEY, false);
 // Runs on local filesystem. Test does not need sync. Turn off checks.
 
htu.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,
 false);
+
+createMasterRegion();
+  }
+
+  /**
+   * Creates a new MasterRegion using an existing {@code htu} on this class.
+   */
+  protected void createMasterRegion() throws IOException {
 configure(htu.getConfiguration());
 choreService = new ChoreService(getClass().getSimpleName());
 hfileCleanerPool = 
DirScanPool.getHFileCleanerScanPool(htu.get

[hbase] branch master updated: HBASE-26350 Add a DEBUG when we fail the SASL handshake

2021-10-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new ad7d698  HBASE-26350 Add a DEBUG when we fail the SASL handshake
ad7d698 is described below

commit ad7d69862203d4204b88df244b6299a5c6a1e792
Author: Josh Elser 
AuthorDate: Mon Oct 11 17:01:06 2021 -0400

HBASE-26350 Add a DEBUG when we fail the SASL handshake

Closes #3743

Signed-off-by: Pankaj 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Wellington Chevreuil 
---
 .../src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index b6293e2..4ebc9fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -372,6 +372,7 @@ abstract class ServerRpcConnection implements Closeable {
 replyToken = saslServer.evaluateResponse(saslToken.hasArray()?
 saslToken.array() : saslToken.toBytes());
   } catch (IOException e) {
+RpcServer.LOG.debug("Failed to execute SASL handshake", e);
 IOException sendToClient = e;
 Throwable cause = e;
 while (cause != null) {


[hbase] branch branch-2.4 updated: HBASE-26350 Add a DEBUG when we fail the SASL handshake

2021-10-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new a372d43  HBASE-26350 Add a DEBUG when we fail the SASL handshake
a372d43 is described below

commit a372d437b3fd82b2965620b1eb31c1b24cd0dd51
Author: Josh Elser 
AuthorDate: Mon Oct 11 17:01:06 2021 -0400

HBASE-26350 Add a DEBUG when we fail the SASL handshake

Closes #3743

Signed-off-by: Pankaj 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Wellington Chevreuil 
---
 .../src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index 29ce30b..03478e4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -364,6 +364,7 @@ abstract class ServerRpcConnection implements Closeable {
 replyToken = saslServer.evaluateResponse(saslToken.hasArray()?
 saslToken.array() : saslToken.toBytes());
   } catch (IOException e) {
+RpcServer.LOG.debug("Failed to execute SASL handshake", e);
 IOException sendToClient = e;
 Throwable cause = e;
 while (cause != null) {


[hbase] branch branch-2.3 updated: HBASE-26350 Add a DEBUG when we fail the SASL handshake

2021-10-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 34f9b3e  HBASE-26350 Add a DEBUG when we fail the SASL handshake
34f9b3e is described below

commit 34f9b3e85820df92131809042865430a024a35f5
Author: Josh Elser 
AuthorDate: Mon Oct 11 17:01:06 2021 -0400

HBASE-26350 Add a DEBUG when we fail the SASL handshake

Closes #3743

Signed-off-by: Pankaj 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Wellington Chevreuil 
---
 .../src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index 29ce30b..03478e4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -364,6 +364,7 @@ abstract class ServerRpcConnection implements Closeable {
 replyToken = saslServer.evaluateResponse(saslToken.hasArray()?
 saslToken.array() : saslToken.toBytes());
   } catch (IOException e) {
+RpcServer.LOG.debug("Failed to execute SASL handshake", e);
 IOException sendToClient = e;
 Throwable cause = e;
 while (cause != null) {


[hbase] branch branch-2.2 updated: HBASE-26350 Add a DEBUG when we fail the SASL handshake

2021-10-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 3a7ad23  HBASE-26350 Add a DEBUG when we fail the SASL handshake
3a7ad23 is described below

commit 3a7ad2375975918313feecc90d3954dba2c826a2
Author: Josh Elser 
AuthorDate: Mon Oct 11 17:01:06 2021 -0400

HBASE-26350 Add a DEBUG when we fail the SASL handshake

Closes #3743

Signed-off-by: Pankaj 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Wellington Chevreuil 
---
 .../src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index c8569c3..c86d3d9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -371,6 +371,7 @@ abstract class ServerRpcConnection implements Closeable {
 replyToken = saslServer.evaluateResponse(saslToken.hasArray()?
 saslToken.array() : saslToken.toBytes());
   } catch (IOException e) {
+RpcServer.LOG.debug("Failed to execute SASL handshake", e);
 IOException sendToClient = e;
 Throwable cause = e;
 while (cause != null) {


[hbase] branch branch-2 updated: HBASE-26350 Add a DEBUG when we fail the SASL handshake

2021-10-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 45b09aa  HBASE-26350 Add a DEBUG when we fail the SASL handshake
45b09aa is described below

commit 45b09aa4d7b844cb723023bf2bdec05e861760f1
Author: Josh Elser 
AuthorDate: Mon Oct 11 17:01:06 2021 -0400

HBASE-26350 Add a DEBUG when we fail the SASL handshake

Closes #3743

Signed-off-by: Pankaj 
Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Wellington Chevreuil 
---
 .../src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java   | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index 208ec8b..08c1542 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -372,6 +372,7 @@ abstract class ServerRpcConnection implements Closeable {
 replyToken = saslServer.evaluateResponse(saslToken.hasArray()?
 saslToken.array() : saslToken.toBytes());
   } catch (IOException e) {
+RpcServer.LOG.debug("Failed to execute SASL handshake", e);
 IOException sendToClient = e;
 Throwable cause = e;
 while (cause != null) {


[hbase-filesystem] branch master updated: HBASE-25900 Hadoop 3.2 and 3.3 support (#25)

2021-10-05 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git


The following commit(s) were added to refs/heads/master by this push:
 new 1669358  HBASE-25900 Hadoop 3.2 and 3.3 support (#25)
1669358 is described below

commit 16693581c356c695f91bbe45a28bcb62789ca4c0
Author: Josh Elser 
AuthorDate: Tue Oct 5 12:51:58 2021 -0400

HBASE-25900 Hadoop 3.2 and 3.3 support (#25)

Build on top of SteveL's original changes. Added indirection so
that the correct S3ClientFactory can be included at test-time. Provides
s3a.xml contracts for each version to reflect what actually works. Skips
the new tests which we know would fail on the older 
Co-authored-by: Peter Somogyi 
Signed-off-by: Peter Somogyi 
---
 hadoop-testutils/pom.xml   |  63 +
 .../org/apache/hadoop/hbase/oss/EmbeddedS3.java|  69 +-
 hadoop3-2-testutils/pom.xml|  56 
 .../hbase/oss/Hadoop32EmbeddedS3ClientFactory.java |  50 +++
 hadoop3-3-testutils/pom.xml|  56 
 .../hbase/oss/Hadoop33EmbeddedS3ClientFactory.java |  63 +
 hbase-oss/pom.xml  |  74 ++
 .../hbase/oss/HBaseObjectStoreSemanticsTest.java   |   4 +-
 .../org/apache/hadoop/hbase/oss/TestUtils.java | 139 ++-
 .../hadoop/hbase/oss/contract/HBOSSContract.java   |   4 +-
 .../hbase/oss/contract/TestHBOSSContract.java  |  48 ++-
 .../oss/contract/TestHBOSSContractCreate.java  |  21 ++-
 .../oss/contract/TestHBOSSContractRenameS3A.java   |   7 +
 .../resources/contract/{ => hadoop-3.2}/s3a.xml|  10 +-
 .../resources/contract/{ => hadoop-3.3}/s3a.xml|  20 ++-
 pom.xml| 149 ++---
 16 files changed, 641 insertions(+), 192 deletions(-)

diff --git a/hadoop-testutils/pom.xml b/hadoop-testutils/pom.xml
new file mode 100644
index 000..53d45b0
--- /dev/null
+++ b/hadoop-testutils/pom.xml
@@ -0,0 +1,63 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hbase.filesystem
+hbase-filesystem
+1.0.0-alpha2-SNAPSHOT
+  
+  hadoop-testutils
+  Common test utilities across Hadoop versions
+   
+
+  org.apache.commons
+  commons-lang3
+  ${commons-lang3.version}
+
+
+  org.slf4j
+  slf4j-api
+  ${slf4j.version}
+
+
+  org.apache.hadoop
+  hadoop-common
+  ${hadoop.version}
+  
+
+  
+  com.google.code.findbugs
+  jsr305
+
+  
+
+
+  org.apache.hadoop
+  hadoop-aws
+  ${hadoop.version}
+
+
+  com.amazonaws
+  aws-java-sdk-bundle
+  ${aws-java-sdk.version}
+
+
+  org.apache.yetus
+  audience-annotations
+  ${audience-annotations.version}
+
+  
+
\ No newline at end of file
diff --git 
a/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java 
b/hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
similarity index 82%
rename from hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
rename to 
hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
index f5411b7..2284a6d 100644
--- a/hbase-oss/src/test/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
+++ b/hadoop-testutils/src/main/java/org/apache/hadoop/hbase/oss/EmbeddedS3.java
@@ -15,14 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.oss;
 
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.SdkClientException;
-import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.s3.AbstractAmazonS3;
-import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.model.Bucket;
 import com.amazonaws.services.s3.model.CopyObjectRequest;
 import com.amazonaws.services.s3.model.CopyObjectResult;
@@ -43,84 +40,26 @@ import com.amazonaws.services.s3.model.S3ObjectSummary;
 import java.io.File;
 import java.io.InputStream;
 import java.io.IOException;
-import java.net.URI;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.s3a.S3ClientFactory;
-import org.apache.hadoop.fs.s3a.s3guard.LocalMet

[hbase] branch branch-2.3 updated: HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a ReadType on the Scan for a Snapshot-based Job

2021-09-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 565d419  HBASE-26273 Force ReadType.STREAM when the user does not 
explicitly set a ReadType on the Scan for a Snapshot-based Job
565d419 is described below

commit 565d4197f531280bf00d5677c5fd27e93cebc53e
Author: Josh Elser 
AuthorDate: Fri Sep 10 16:24:13 2021 -0400

HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a 
ReadType on the Scan for a Snapshot-based Job

HBase 2 moved over Scans to use PREAD by default instead of STREAM like
HBase 1. In the context of a MapReduce job, we can generally expect that
clients using the InputFormat (batch job) would be reading most of the
data for a job. Cater to them, but still give users who want PREAD the
ability to do so.

Signed-off-by: Duo Zhang 
Signed-off-by: Tak Lon (Stephen) Wu 
---
 .../mapreduce/TableSnapshotInputFormatImpl.java| 18 
 .../mapreduce/TestTableSnapshotInputFormat.java| 33 ++
 2 files changed, 51 insertions(+)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index c3f05f4..f83a9b9 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -129,6 +130,14 @@ public class TableSnapshotInputFormatImpl {
   public static final boolean 
SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true;
 
   /**
+   * The {@link ReadType} which should be set on the {@link Scan} to read the 
HBase Snapshot,
+   * default STREAM.
+   */
+  public static final String SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE =
+  "hbase.TableSnapshotInputFormat.scanner.readtype";
+  public static final ReadType SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT = 
ReadType.STREAM;
+
+  /**
* Implementation class for InputSplit logic common between mapred and 
mapreduce.
*/
   public static class InputSplit implements Writable {
@@ -382,6 +391,15 @@ public class TableSnapshotInputFormatImpl {
 } else {
   throw new IllegalArgumentException("Unable to create scan");
 }
+
+if (scan.getReadType() == ReadType.DEFAULT) {
+  LOG.info("Provided Scan has DEFAULT ReadType,"
+  + " updating STREAM for Snapshot-based InputFormat");
+  // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the 
default case.
+  scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE,
+  SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT));
+}
+
 return scan;
   }
 
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index 0820f3b..b1a07f0 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNA
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT;
+import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -39,6 +41,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.Table;
 import org.

[hbase] branch branch-2.4 updated: HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a ReadType on the Scan for a Snapshot-based Job

2021-09-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 74eff79  HBASE-26273 Force ReadType.STREAM when the user does not 
explicitly set a ReadType on the Scan for a Snapshot-based Job
74eff79 is described below

commit 74eff79c274ccf40a275a2cf8d04bf8319f4fed5
Author: Josh Elser 
AuthorDate: Fri Sep 10 16:24:13 2021 -0400

HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a 
ReadType on the Scan for a Snapshot-based Job

HBase 2 moved over Scans to use PREAD by default instead of STREAM like
HBase 1. In the context of a MapReduce job, we can generally expect that
clients using the InputFormat (batch job) would be reading most of the
data for a job. Cater to them, but still give users who want PREAD the
ability to do so.

Signed-off-by: Duo Zhang 
Signed-off-by: Tak Lon (Stephen) Wu 
---
 .../mapreduce/TableSnapshotInputFormatImpl.java| 18 
 .../mapreduce/TestTableSnapshotInputFormat.java| 33 ++
 2 files changed, 51 insertions(+)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index c3f05f4..f83a9b9 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -129,6 +130,14 @@ public class TableSnapshotInputFormatImpl {
   public static final boolean 
SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true;
 
   /**
+   * The {@link ReadType} which should be set on the {@link Scan} to read the 
HBase Snapshot,
+   * default STREAM.
+   */
+  public static final String SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE =
+  "hbase.TableSnapshotInputFormat.scanner.readtype";
+  public static final ReadType SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT = 
ReadType.STREAM;
+
+  /**
* Implementation class for InputSplit logic common between mapred and 
mapreduce.
*/
   public static class InputSplit implements Writable {
@@ -382,6 +391,15 @@ public class TableSnapshotInputFormatImpl {
 } else {
   throw new IllegalArgumentException("Unable to create scan");
 }
+
+if (scan.getReadType() == ReadType.DEFAULT) {
+  LOG.info("Provided Scan has DEFAULT ReadType,"
+  + " updating STREAM for Snapshot-based InputFormat");
+  // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the 
default case.
+  scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE,
+  SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT));
+}
+
 return scan;
   }
 
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index 0820f3b..b1a07f0 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNA
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT;
+import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -39,6 +41,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.Table;
 import org.

[hbase] branch branch-2 updated: HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a ReadType on the Scan for a Snapshot-based Job

2021-09-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new e8d6213  HBASE-26273 Force ReadType.STREAM when the user does not 
explicitly set a ReadType on the Scan for a Snapshot-based Job
e8d6213 is described below

commit e8d62139d5521ab5679988eef1837c44584c4f80
Author: Josh Elser 
AuthorDate: Fri Sep 10 16:24:13 2021 -0400

HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a 
ReadType on the Scan for a Snapshot-based Job

HBase 2 moved over Scans to use PREAD by default instead of STREAM like
HBase 1. In the context of a MapReduce job, we can generally expect that
clients using the InputFormat (batch job) would be reading most of the
data for a job. Cater to them, but still give users who want PREAD the
ability to do so.

Signed-off-by: Duo Zhang 
Signed-off-by: Tak Lon (Stephen) Wu 
---
 .../mapreduce/TableSnapshotInputFormatImpl.java| 18 
 .../mapreduce/TestTableSnapshotInputFormat.java| 33 ++
 2 files changed, 51 insertions(+)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index c3f05f4..f83a9b9 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -129,6 +130,14 @@ public class TableSnapshotInputFormatImpl {
   public static final boolean 
SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true;
 
   /**
+   * The {@link ReadType} which should be set on the {@link Scan} to read the 
HBase Snapshot,
+   * default STREAM.
+   */
+  public static final String SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE =
+  "hbase.TableSnapshotInputFormat.scanner.readtype";
+  public static final ReadType SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT = 
ReadType.STREAM;
+
+  /**
* Implementation class for InputSplit logic common between mapred and 
mapreduce.
*/
   public static class InputSplit implements Writable {
@@ -382,6 +391,15 @@ public class TableSnapshotInputFormatImpl {
 } else {
   throw new IllegalArgumentException("Unable to create scan");
 }
+
+if (scan.getReadType() == ReadType.DEFAULT) {
+  LOG.info("Provided Scan has DEFAULT ReadType,"
+  + " updating STREAM for Snapshot-based InputFormat");
+  // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the 
default case.
+  scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE,
+  SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT));
+}
+
 return scan;
   }
 
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index 0820f3b..b1a07f0 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNA
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT;
+import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -39,6 +41,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.Table;
 import org.

[hbase] branch master updated: HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a ReadType on the Scan for a Snapshot-based Job

2021-09-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 86bc640  HBASE-26273 Force ReadType.STREAM when the user does not 
explicitly set a ReadType on the Scan for a Snapshot-based Job
86bc640 is described below

commit 86bc640c17c71f16926251ba0bc4326caf8bbed0
Author: Josh Elser 
AuthorDate: Fri Sep 10 16:24:13 2021 -0400

HBASE-26273 Force ReadType.STREAM when the user does not explicitly set a 
ReadType on the Scan for a Snapshot-based Job

HBase 2 moved over Scans to use PREAD by default instead of STREAM like
HBase 1. In the context of a MapReduce job, we can generally expect that
clients using the InputFormat (batch job) would be reading most of the
data for a job. Cater to them, but still give users who want PREAD the
ability to do so.

Signed-off-by: Duo Zhang 
Signed-off-by: Tak Lon (Stephen) Wu 
---
 .../mapreduce/TableSnapshotInputFormatImpl.java| 18 
 .../mapreduce/TestTableSnapshotInputFormat.java| 33 ++
 2 files changed, 51 insertions(+)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 22c19be..c467a3c 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -129,6 +130,14 @@ public class TableSnapshotInputFormatImpl {
   public static final boolean 
SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true;
 
   /**
+   * The {@link ReadType} which should be set on the {@link Scan} to read the 
HBase Snapshot,
+   * default STREAM.
+   */
+  public static final String SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE =
+  "hbase.TableSnapshotInputFormat.scanner.readtype";
+  public static final ReadType SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT = 
ReadType.STREAM;
+
+  /**
* Implementation class for InputSplit logic common between mapred and 
mapreduce.
*/
   public static class InputSplit implements Writable {
@@ -382,6 +391,15 @@ public class TableSnapshotInputFormatImpl {
 } else {
   throw new IllegalArgumentException("Unable to create scan");
 }
+
+if (scan.getReadType() == ReadType.DEFAULT) {
+  LOG.info("Provided Scan has DEFAULT ReadType,"
+  + " updating STREAM for Snapshot-based InputFormat");
+  // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the 
default case.
+  scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE,
+  SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT));
+}
+
 return scan;
   }
 
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index 34e6b27..f4e9f7d 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNA
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION;
 import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT;
+import static 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -39,6 +41,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.Table;
 import org.

[hbase] branch master updated: HBASE-26277: Fix InterfaceAudience for BalanceRequest$Builder and BalanceResponse$Builder

2021-09-10 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new ed87e98  HBASE-26277: Fix InterfaceAudience for BalanceRequest$Builder 
and BalanceResponse$Builder
ed87e98 is described below

commit ed87e98d670e4552c2a56fc9b8eab61a165fa80e
Author: Bryan Beaudreault 
AuthorDate: Fri Sep 10 07:49:07 2021 -0400

HBASE-26277: Fix InterfaceAudience for BalanceRequest$Builder and 
BalanceResponse$Builder

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java
index 03ce5db..4e67bce 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java
@@ -30,7 +30,7 @@ public final class BalanceRequest {
   /**
* Builder for constructing a {@link BalanceRequest}
*/
-  @InterfaceAudience.Private
+  @InterfaceAudience.Public
   public final static class Builder {
 private boolean dryRun = false;
 private boolean ignoreRegionsInTransition = false;
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
index 0d8e84b..1438782 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
@@ -27,9 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 public final class BalanceResponse {
 
   /**
-   * Builds a {@link BalanceResponse} for returning results of a balance 
invocation to callers
+   * Used in HMaster to build a {@link BalanceResponse} for returning results 
of a balance invocation to callers
*/
-  @InterfaceAudience.Public
+  @InterfaceAudience.Private
   public final static class Builder {
 private boolean balancerRan;
 private int movesCalculated;


[hbase] branch branch-2 updated: HBASE-26277: Fix InterfaceAudience for BalanceRequest$Builder and BalanceResponse$Builder

2021-09-10 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 1664f52  HBASE-26277: Fix InterfaceAudience for BalanceRequest$Builder 
and BalanceResponse$Builder
1664f52 is described below

commit 1664f52066f1eadab141fd653ba67f88774cf922
Author: Bryan Beaudreault 
AuthorDate: Fri Sep 10 07:49:07 2021 -0400

HBASE-26277: Fix InterfaceAudience for BalanceRequest$Builder and 
BalanceResponse$Builder

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java
index 03ce5db..4e67bce 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java
@@ -30,7 +30,7 @@ public final class BalanceRequest {
   /**
* Builder for constructing a {@link BalanceRequest}
*/
-  @InterfaceAudience.Private
+  @InterfaceAudience.Public
   public final static class Builder {
 private boolean dryRun = false;
 private boolean ignoreRegionsInTransition = false;
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
index 0d8e84b..1438782 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java
@@ -27,9 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 public final class BalanceResponse {
 
   /**
-   * Builds a {@link BalanceResponse} for returning results of a balance 
invocation to callers
+   * Used in HMaster to build a {@link BalanceResponse} for returning results 
of a balance invocation to callers
*/
-  @InterfaceAudience.Public
+  @InterfaceAudience.Private
   public final static class Builder {
 private boolean balancerRan;
 private int movesCalculated;


[hbase] 02/02: HBASE-26147 Add a dry run mode to the balancer, where moves are calculated but not actually executed

2021-09-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e07928df4cf2ef7564808bc988a289ef336af03a
Author: Bryan Beaudreault 
AuthorDate: Wed Sep 1 21:55:40 2021 -0400

HBASE-26147 Add a dry run mode to the balancer, where moves are calculated 
but not actually executed

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 

(Re-application of HBASE-26147 with the correct author metadata)
---
 .../java/org/apache/hadoop/hbase/client/Admin.java |  28 -
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  22 +++-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java   |   4 +-
 .../apache/hadoop/hbase/client/BalanceRequest.java | 114 +++
 .../hadoop/hbase/client/BalanceResponse.java   | 126 +
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |  69 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java|  14 +--
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  32 ++
 .../hbase/shaded/protobuf/RequestConverter.java|  10 --
 .../src/main/protobuf/Master.proto |   5 +-
 .../apache/hadoop/hbase/rsgroup/RSGroupAdmin.java  |  14 ++-
 .../hadoop/hbase/rsgroup/RSGroupAdminClient.java   |  12 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java |  21 +++-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |  29 +++--
 .../hadoop/hbase/rsgroup/RSGroupProtobufUtil.java  |  35 +-
 hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto |   4 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBalance.java  |  90 ++-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java |   6 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsFallback.java |   4 +-
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java |   6 +-
 .../hadoop/hbase/coprocessor/MasterObserver.java   |  16 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java|  54 +
 .../hadoop/hbase/master/MasterCoprocessorHost.java |  18 +--
 .../hadoop/hbase/master/MasterRpcServices.java |   3 +-
 .../hbase/security/access/AccessController.java|   3 +-
 .../apache/hadoop/hbase/TestRegionRebalancing.java |  27 -
 .../client/TestAsyncTableGetMultiThreaded.java |   2 +-
 .../hadoop/hbase/client/TestMultiParallel.java |   2 +-
 .../hbase/client/TestSeparateClientZKCluster.java  |   4 +-
 .../hbase/coprocessor/TestMasterObserver.java  |  13 ++-
 .../hbase/master/TestMasterDryRunBalancer.java | 126 +
 .../master/procedure/TestProcedurePriority.java|   3 +-
 .../security/access/TestAccessController.java  |   3 +-
 .../access/TestWithDisabledAuthorization.java  |   3 +-
 hbase-shell/src/main/ruby/hbase/admin.rb   |   7 +-
 hbase-shell/src/main/ruby/hbase/balancer_utils.rb  |  57 ++
 hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb   |   7 +-
 .../main/ruby/shell/commands/balance_rsgroup.rb|  22 +++-
 .../src/main/ruby/shell/commands/balancer.rb   |  33 +++---
 hbase-shell/src/test/ruby/hbase/admin_test.rb  |   8 +-
 .../src/test/ruby/hbase/balancer_utils_test.rb |  78 +
 .../src/test/ruby/shell/rsgroup_shell_test.rb  |   2 +
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |   7 ++
 43 files changed, 943 insertions(+), 200 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index fb61612..a3a5107 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1251,7 +1251,20 @@ public interface Admin extends Abortable, Closeable {
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
*/
-  boolean balance() throws IOException;
+  default boolean balance() throws IOException {
+return balance(BalanceRequest.defaultInstance())
+  .isBalancerRan();
+  }
+
+  /**
+   * Invoke the balancer with the given balance request.  The BalanceRequest 
defines how the
+   * balancer will run. See {@link BalanceRequest} for more details.
+   *
+   * @param request defines how the balancer should run
+   * @return {@link BalanceResponse} with details about the results of the 
invocation.
+   * @throws IOException if a remote or network exception occurs
+   */
+  BalanceResponse balance(BalanceRequest request) throws IOException;
 
   /**
* Invoke the balancer.  Will run the balancer and if regions to move, it 
will
@@ -1262,7 +1275,7 @@ public interface Admin extends Abortable, Closeable {
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
* @deprecated Since 2.0.0. Will be removed in 3.0.0.
-   * Use {@link #balance(boolean)} instead.
+   * Use {@link #balance(BalanceRequest)} instead.
*/
   @Deprecated

[hbase] 01/02: Revert "HBASE-26147 Add a dry run mode to the balancer, where moves are calculated but not actually executed"

2021-09-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 1e74f084035850130a71c7e7b7cad3194883078b
Author: Josh Elser 
AuthorDate: Wed Sep 1 21:55:13 2021 -0400

Revert "HBASE-26147 Add a dry run mode to the balancer, where moves are 
calculated but not actually executed"

This reverts commit 5b58d11c91cbbc2f73e4252088c97df12a49ff87.

I accidentally applied this as myself instead of as Bryan. Reverting it
to apply the patch properly.
---
 .../java/org/apache/hadoop/hbase/client/Admin.java |  28 +
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  22 +---
 .../hadoop/hbase/client/AsyncHBaseAdmin.java   |   4 +-
 .../apache/hadoop/hbase/client/BalanceRequest.java | 114 ---
 .../hadoop/hbase/client/BalanceResponse.java   | 126 -
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java |  69 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java|  14 +--
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  32 --
 .../hbase/shaded/protobuf/RequestConverter.java|  10 ++
 .../src/main/protobuf/Master.proto |   5 +-
 .../apache/hadoop/hbase/rsgroup/RSGroupAdmin.java  |  14 +--
 .../hadoop/hbase/rsgroup/RSGroupAdminClient.java   |  12 +-
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java |  21 +---
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java   |  29 ++---
 .../hadoop/hbase/rsgroup/RSGroupProtobufUtil.java  |  35 +-
 hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto |   4 -
 .../hadoop/hbase/rsgroup/TestRSGroupsBalance.java  |  90 +--
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java |   6 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsFallback.java |   4 +-
 .../hbase/rsgroup/VerifyingRSGroupAdminClient.java |   6 +-
 .../hadoop/hbase/coprocessor/MasterObserver.java   |  16 +--
 .../org/apache/hadoop/hbase/master/HMaster.java|  54 -
 .../hadoop/hbase/master/MasterCoprocessorHost.java |  18 ++-
 .../hadoop/hbase/master/MasterRpcServices.java |   3 +-
 .../hbase/security/access/AccessController.java|   3 +-
 .../apache/hadoop/hbase/TestRegionRebalancing.java |  27 +
 .../client/TestAsyncTableGetMultiThreaded.java |   2 +-
 .../hadoop/hbase/client/TestMultiParallel.java |   2 +-
 .../hbase/client/TestSeparateClientZKCluster.java  |   4 +-
 .../hbase/coprocessor/TestMasterObserver.java  |  13 +--
 .../hbase/master/TestMasterDryRunBalancer.java | 126 -
 .../master/procedure/TestProcedurePriority.java|   3 +-
 .../security/access/TestAccessController.java  |   3 +-
 .../access/TestWithDisabledAuthorization.java  |   3 +-
 hbase-shell/src/main/ruby/hbase/admin.rb   |   7 +-
 hbase-shell/src/main/ruby/hbase/balancer_utils.rb  |  57 --
 hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb   |   7 +-
 .../main/ruby/shell/commands/balance_rsgroup.rb|  22 +---
 .../src/main/ruby/shell/commands/balancer.rb   |  33 +++---
 hbase-shell/src/test/ruby/hbase/admin_test.rb  |   8 +-
 .../src/test/ruby/hbase/balancer_utils_test.rb |  78 -
 .../src/test/ruby/shell/rsgroup_shell_test.rb  |   2 -
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |   7 --
 43 files changed, 200 insertions(+), 943 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index a3a5107..fb61612 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1251,20 +1251,7 @@ public interface Admin extends Abortable, Closeable {
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
*/
-  default boolean balance() throws IOException {
-return balance(BalanceRequest.defaultInstance())
-  .isBalancerRan();
-  }
-
-  /**
-   * Invoke the balancer with the given balance request.  The BalanceRequest 
defines how the
-   * balancer will run. See {@link BalanceRequest} for more details.
-   *
-   * @param request defines how the balancer should run
-   * @return {@link BalanceResponse} with details about the results of the 
invocation.
-   * @throws IOException if a remote or network exception occurs
-   */
-  BalanceResponse balance(BalanceRequest request) throws IOException;
+  boolean balance() throws IOException;
 
   /**
* Invoke the balancer.  Will run the balancer and if regions to move, it 
will
@@ -1275,7 +1262,7 @@ public interface Admin extends Abortable, Closeable {
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
* @deprecated Since 2.0.0. Will be removed in 3.0.0.
-   * Use {@link #balance(BalanceRequest)} instead.
+   * 

[hbase] branch branch-2 updated (5b58d11 -> e07928d)

2021-09-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a change to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from 5b58d11  HBASE-26147 Add a dry run mode to the balancer, where moves 
are calculated but not actually executed
 new 1e74f08  Revert "HBASE-26147 Add a dry run mode to the balancer, where 
moves are calculated but not actually executed"
 new e07928d  HBASE-26147 Add a dry run mode to the balancer, where moves 
are calculated but not actually executed

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:


[hbase] branch master updated: HBASE-26147 Add a dry run mode to the balancer, where moves are calculated but not actually executed

2021-09-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new a15e94a  HBASE-26147 Add a dry run mode to the balancer, where moves 
are calculated but not actually executed
a15e94a is described below

commit a15e94a47f06ae665a0c213d092761431e907eac
Author: Bryan Beaudreault 
AuthorDate: Tue Jul 27 17:25:59 2021 -0400

HBASE-26147 Add a dry run mode to the balancer, where moves are calculated 
but not actually executed

Closes #3630

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
*/
-  boolean balance() throws IOException;
+  default boolean balance() throws IOException {
+return balance(BalanceRequest.defaultInstance())
+  .isBalancerRan();
+  }
+
+  /**
+   * Invoke the balancer with the given balance request.  The BalanceRequest 
defines how the
+   * balancer will run. See {@link BalanceRequest} for more details.
+   *
+   * @param request defines how the balancer should run
+   * @return {@link BalanceResponse} with details about the results of the 
invocation.
+   * @throws IOException if a remote or network exception occurs
+   */
+  BalanceResponse balance(BalanceRequest request) throws IOException;
 
   /**
* Invoke the balancer.  Will run the balancer and if regions to move, it 
will
@@ -841,8 +854,17 @@ public interface Admin extends Abortable, Closeable {
* @param force whether we should force balance even if there is region in 
transition
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
+   * @deprecated Since 2.5.0. Will be removed in 4.0.0.
+   * Use {@link #balance(BalanceRequest)} instead.
*/
-  boolean balance(boolean force) throws IOException;
+  @Deprecated
+  default boolean balance(boolean force) throws IOException {
+return balance(
+  BalanceRequest.newBuilder()
+  .setIgnoreRegionsInTransition(force)
+  .build()
+).isBalancerRan();
+  }
 
   /**
* Query the current state of the balancer.
@@ -2494,10 +2516,20 @@ public interface Admin extends Abortable, Closeable {
   /**
* Balance regions in the given RegionServer group
* @param groupName the group name
-   * @return boolean Whether balance ran or not
+   * @return BalanceResponse details about the balancer run
* @throws IOException if a remote or network exception occurs
*/
-  boolean balanceRSGroup(String groupName) throws IOException;
+  default BalanceResponse balanceRSGroup(String groupName) throws IOException {
+return balanceRSGroup(groupName, BalanceRequest.defaultInstance());
+  }
+
+  /**
+   * Balance regions in the given RegionServer group, running based on
+   * the given {@link BalanceRequest}.
+   *
+   * @return BalanceResponse details about the balancer run
+   */
+  BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) 
throws IOException;
 
   /**
* Rename rsgroup
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
index 4a62f02..08de979 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -374,6 +374,11 @@ class AdminOverAsyncAdmin implements Admin {
 return get(admin.balancerSwitch(onOrOff, synchronous));
   }
 
+
+  public BalanceResponse balance(BalanceRequest request) throws IOException {
+return get(admin.balance(request));
+  }
+
   @Override
   public boolean balance() throws IOException {
 return get(admin.balance());
@@ -1006,8 +1011,8 @@ class AdminOverAsyncAdmin implements Admin {
   }
 
   @Override
-  public boolean balanceRSGroup(String groupName) throws IOException {
-return get(admin.balanceRSGroup(groupName));
+  public BalanceResponse balanceRSGroup(String groupName, BalanceRequest 
request) throws IOException {
+return get(admin.balanceRSGroup(groupName, request));
   }
 
   @Override
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index a5b1510..c366d3e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1287,7 +1287,8 @@ public interface AsyncAdmin {
* {@link CompletableFuture}.
*/
   default CompletableFuture balance() {
-return balance(false);
+return balance(BalanceRequest.defaultInstance())
+  .thenApply(BalanceResponse::isBalancerRan);
   }
 
   /**
@@ -1297,8 +1298,25

[hbase] branch branch-2 updated: HBASE-26147 Add a dry run mode to the balancer, where moves are calculated but not actually executed

2021-09-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 5b58d11  HBASE-26147 Add a dry run mode to the balancer, where moves 
are calculated but not actually executed
5b58d11 is described below

commit 5b58d11c91cbbc2f73e4252088c97df12a49ff87
Author: Josh Elser 
AuthorDate: Wed Sep 1 21:47:53 2021 -0400

HBASE-26147 Add a dry run mode to the balancer, where moves are calculated 
but not actually executed

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
*/
-  boolean balance() throws IOException;
+  default boolean balance() throws IOException {
+return balance(BalanceRequest.defaultInstance())
+  .isBalancerRan();
+  }
+
+  /**
+   * Invoke the balancer with the given balance request.  The BalanceRequest 
defines how the
+   * balancer will run. See {@link BalanceRequest} for more details.
+   *
+   * @param request defines how the balancer should run
+   * @return {@link BalanceResponse} with details about the results of the 
invocation.
+   * @throws IOException if a remote or network exception occurs
+   */
+  BalanceResponse balance(BalanceRequest request) throws IOException;
 
   /**
* Invoke the balancer.  Will run the balancer and if regions to move, it 
will
@@ -1262,7 +1275,7 @@ public interface Admin extends Abortable, Closeable {
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
* @deprecated Since 2.0.0. Will be removed in 3.0.0.
-   * Use {@link #balance(boolean)} instead.
+   * Use {@link #balance(BalanceRequest)} instead.
*/
   @Deprecated
   default boolean balancer(boolean force) throws IOException {
@@ -1277,8 +1290,17 @@ public interface Admin extends Abortable, Closeable {
* @param force whether we should force balance even if there is region in 
transition
* @return true if balancer ran, false otherwise.
* @throws IOException if a remote or network exception occurs
+   * @deprecated Since 2.5.0. Will be removed in 4.0.0.
+   * Use {@link #balance(BalanceRequest)} instead.
*/
-  boolean balance(boolean force) throws IOException;
+  @Deprecated
+  default boolean balance(boolean force) throws IOException {
+return balance(
+  BalanceRequest.newBuilder()
+  .setIgnoreRegionsInTransition(force)
+  .build()
+).isBalancerRan();
+  }
 
   /**
* Query the current state of the balancer.
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 6b8fda7..85d5455 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1257,7 +1257,8 @@ public interface AsyncAdmin {
* {@link CompletableFuture}.
*/
   default CompletableFuture balance() {
-return balance(false);
+return balance(BalanceRequest.defaultInstance())
+  .thenApply(BalanceResponse::isBalancerRan);
   }
 
   /**
@@ -1267,8 +1268,25 @@ public interface AsyncAdmin {
* @param forcible whether we should force balance even if there is region 
in transition.
* @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
* {@link CompletableFuture}.
+   * @deprecated Since 2.5.0. Will be removed in 4.0.0.
+   *  Use {@link #balance(BalanceRequest)} instead.
+   */
+  default CompletableFuture balance(boolean forcible) {
+return balance(
+  BalanceRequest.newBuilder()
+.setIgnoreRegionsInTransition(forcible)
+.build()
+).thenApply(BalanceResponse::isBalancerRan);
+  }
+
+  /**
+   * Invoke the balancer with the given balance request.  The BalanceRequest 
defines how the
+   * balancer will run. See {@link BalanceRequest} for more details.
+   *
+   * @param request defines how the balancer should run
+   * @return {@link BalanceResponse} with details about the results of the 
invocation.
*/
-  CompletableFuture balance(boolean forcible);
+  CompletableFuture balance(BalanceRequest request);
 
   /**
* Query the current state of the balancer.
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index c7b9897..db720f3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -684,8 +684,8 @@ class AsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture balance(boolean forcible) {
-return wrap

[hbase] branch branch-2 updated: HBASE-26212 Expose configuration to enable/disable AuthUtil (#3619)

2021-08-23 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2728821  HBASE-26212 Expose configuration to enable/disable AuthUtil 
(#3619)
2728821 is described below

commit 2728821e62ec882619642b9ea0fa5913c3167ec9
Author: Josh Elser 
AuthorDate: Mon Aug 23 20:23:52 2021 -0400

HBASE-26212 Expose configuration to enable/disable AuthUtil (#3619)

In some situations, a caller may know that it is properly managing the
Kerberos ticket to talk to HBase. In these situations, it's possible
that AuthUtil still tries to do renewals, but just fails repeatedly to
do so. Give a configuration flag for such clients to be able to tell
AuthUtil to simply stop trying.

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |  2 +-
 .../hbase/client/ConnectionImplementation.java |  2 +-
 .../java/org/apache/hadoop/hbase/AuthUtil.java | 22 +++---
 3 files changed, 21 insertions(+), 5 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 2653361..2ed7399 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -171,7 +171,7 @@ class AsyncConnectionImpl implements AsyncConnection {
 
   private void spawnRenewalChore(final UserGroupInformation user) {
 ChoreService service = getChoreService();
-service.scheduleChore(AuthUtil.getAuthRenewalChore(user));
+service.scheduleChore(AuthUtil.getAuthRenewalChore(user, conf));
   }
 
   /**
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 936f7f5..33f92ed 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -374,7 +374,7 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
 
   private void spawnRenewalChore(final UserGroupInformation user) {
 ChoreService service = getChoreService();
-service.scheduleChore(AuthUtil.getAuthRenewalChore(user));
+service.scheduleChore(AuthUtil.getAuthRenewalChore(user, conf));
   }
 
   /**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index d8d4f78..95dfdd2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -90,6 +90,10 @@ public final class AuthUtil {
   /** Client principal */
   public static final String HBASE_CLIENT_KERBEROS_PRINCIPAL = 
"hbase.client.keytab.principal";
 
+  /** Configuration to automatically try to renew keytab-based logins */
+  public static final String HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_KEY = 
"hbase.client.keytab.automatic.renewal";
+  public static final boolean HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_DEFAULT = 
true;
+
   private AuthUtil() {
 super();
   }
@@ -189,8 +193,8 @@ public final class AuthUtil {
* @return a ScheduledChore for renewals.
*/
   @InterfaceAudience.Private
-  public static ScheduledChore getAuthRenewalChore(final UserGroupInformation 
user) {
-if (!user.hasKerberosCredentials()) {
+  public static ScheduledChore getAuthRenewalChore(final UserGroupInformation 
user, Configuration conf) {
+if (!user.hasKerberosCredentials() || !isAuthRenewalChoreEnabled(conf)) {
   return null;
 }
 
@@ -221,8 +225,11 @@ public final class AuthUtil {
*/
   @Deprecated
   public static ScheduledChore getAuthChore(Configuration conf) throws 
IOException {
+if (!isAuthRenewalChoreEnabled(conf)) {
+  return null;
+}
 User user = loginClientAsService(conf);
-return getAuthRenewalChore(user.getUGI());
+return getAuthRenewalChore(user.getUGI(), conf);
   }
 
   private static Stoppable createDummyStoppable() {
@@ -271,4 +278,13 @@ public final class AuthUtil {
   public static String toGroupEntry(String name) {
 return GROUP_PREFIX + name;
   }
+
+  /**
+   * Returns true if the chore to automatically renew Kerberos tickets (from
+   * keytabs) should be started. The default is true.
+   */
+  static boolean isAuthRenewalChoreEnabled(Configuration conf) {
+return conf.getBoolean(HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_KEY,
+HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_DEFAULT);
+  }
 }


[hbase] branch master updated: HBASE-26212 Expose configuration to enable/disable AuthUtil

2021-08-21 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 6b5bd75  HBASE-26212 Expose configuration to enable/disable AuthUtil
6b5bd75 is described below

commit 6b5bd75e464cad550854a144d3b989b97961e95b
Author: Josh Elser 
AuthorDate: Sat Aug 21 15:57:06 2021 -0400

HBASE-26212 Expose configuration to enable/disable AuthUtil

In some situations, a caller may know that it is properly managing the
Kerberos ticket to talk to HBase. In these situations, it's possible
that AuthUtil still tries to do renewals, but just fails repeatedly to
do so. Give a configuration flag for such clients to be able to tell
AuthUtil to simply stop trying.

Signed-off-by: Duo Zhang 

Closes #3609
---
 .../hadoop/hbase/client/AsyncConnectionImpl.java   |  2 +-
 .../java/org/apache/hadoop/hbase/AuthUtil.java | 22 +++---
 2 files changed, 20 insertions(+), 4 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 25a98ed..2ac59fc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -180,7 +180,7 @@ class AsyncConnectionImpl implements AsyncConnection {
 
   private void spawnRenewalChore(final UserGroupInformation user) {
 ChoreService service = getChoreService();
-service.scheduleChore(AuthUtil.getAuthRenewalChore(user));
+service.scheduleChore(AuthUtil.getAuthRenewalChore(user, conf));
   }
 
   /**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index d8d4f78..95dfdd2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -90,6 +90,10 @@ public final class AuthUtil {
   /** Client principal */
   public static final String HBASE_CLIENT_KERBEROS_PRINCIPAL = 
"hbase.client.keytab.principal";
 
+  /** Configuration to automatically try to renew keytab-based logins */
+  public static final String HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_KEY = 
"hbase.client.keytab.automatic.renewal";
+  public static final boolean HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_DEFAULT = 
true;
+
   private AuthUtil() {
 super();
   }
@@ -189,8 +193,8 @@ public final class AuthUtil {
* @return a ScheduledChore for renewals.
*/
   @InterfaceAudience.Private
-  public static ScheduledChore getAuthRenewalChore(final UserGroupInformation 
user) {
-if (!user.hasKerberosCredentials()) {
+  public static ScheduledChore getAuthRenewalChore(final UserGroupInformation 
user, Configuration conf) {
+if (!user.hasKerberosCredentials() || !isAuthRenewalChoreEnabled(conf)) {
   return null;
 }
 
@@ -221,8 +225,11 @@ public final class AuthUtil {
*/
   @Deprecated
   public static ScheduledChore getAuthChore(Configuration conf) throws 
IOException {
+if (!isAuthRenewalChoreEnabled(conf)) {
+  return null;
+}
 User user = loginClientAsService(conf);
-return getAuthRenewalChore(user.getUGI());
+return getAuthRenewalChore(user.getUGI(), conf);
   }
 
   private static Stoppable createDummyStoppable() {
@@ -271,4 +278,13 @@ public final class AuthUtil {
   public static String toGroupEntry(String name) {
 return GROUP_PREFIX + name;
   }
+
+  /**
+   * Returns true if the chore to automatically renew Kerberos tickets (from
+   * keytabs) should be started. The default is true.
+   */
+  static boolean isAuthRenewalChoreEnabled(Configuration conf) {
+return conf.getBoolean(HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_KEY,
+HBASE_CLIENT_AUTOMATIC_KEYTAB_RENEWAL_DEFAULT);
+  }
 }


[hbase] branch branch-1 updated: HBASE-25712 Backport of HBASE-25692 to branch-1

2021-04-02 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 7851df7  HBASE-25712 Backport of HBASE-25692 to branch-1
7851df7 is described below

commit 7851df7927f26f86f7aa9271496a0ac653c5b91e
Author: Josh Elser 
AuthorDate: Mon Mar 29 20:28:25 2021 -0400

HBASE-25712 Backport of HBASE-25692 to branch-1

HBASE-25692 ensures that we do not leak any InputStream (Socket)
which would otherwise remain as CLOSE_WAIT until the java process
exits. These orphaned sockets would eventually saturate Linux network
and file limits.

Closes #3104

Signed-off-by: Wellington Chevreuil 
---
 .../org/apache/hadoop/hbase/wal/WALFactory.java|  56 +
 .../apache/hadoop/hbase/wal/FileSystemProxy.java   | 106 
 .../apache/hadoop/hbase/wal/TestWALFactory.java| 137 +
 3 files changed, 274 insertions(+), 25 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 8a82bd0..47d3c54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -311,7 +311,7 @@ public class WALFactory {
 reader.init(fs, path, conf, stream);
 return reader;
   }
-} catch (IOException e) {
+} catch (Exception e) {
   if (stream != null) {
 try {
   stream.close();
@@ -328,33 +328,39 @@ public class WALFactory {
   LOG.debug("exception details", exception);
 }
   }
-  String msg = e.getMessage();
-  if (msg != null && (msg.contains("Cannot obtain block length")
-  || msg.contains("Could not obtain the last block")
-  || msg.matches("Blocklist for [^ ]* has changed.*"))) {
-if (++nbAttempt == 1) {
-  LOG.warn("Lease should have recovered. This is not expected. 
Will retry", e);
-}
-if (reporter != null && !reporter.progress()) {
-  throw new InterruptedIOException("Operation is cancelled");
-}
-if (nbAttempt > 2 && openTimeout < 
EnvironmentEdgeManager.currentTime()) {
-  LOG.error("Can't open after " + nbAttempt + " attempts and "
-  + (EnvironmentEdgeManager.currentTime() - startWaiting) + 
"ms " + " for " + path);
-} else {
-  try {
-Thread.sleep(nbAttempt < 3 ? 500 : 1000);
-continue; // retry
-  } catch (InterruptedException ie) {
-InterruptedIOException iioe = new InterruptedIOException();
-iioe.initCause(ie);
-throw iioe;
+  if (e instanceof IOException) {
+String msg = e.getMessage();
+if (msg != null && (msg.contains("Cannot obtain block length")
+|| msg.contains("Could not obtain the last block")
+|| msg.matches("Blocklist for [^ ]* has changed.*"))) {
+  if (++nbAttempt == 1) {
+LOG.warn("Lease should have recovered. This is not expected. 
Will retry", e);
+  }
+  if (reporter != null && !reporter.progress()) {
+throw new InterruptedIOException("Operation is cancelled");
   }
+  if (nbAttempt > 2 && openTimeout < 
EnvironmentEdgeManager.currentTime()) {
+LOG.error("Can't open after " + nbAttempt + " attempts and "
++ (EnvironmentEdgeManager.currentTime() - startWaiting)
++ "ms " + " for " + path);
+  } else {
+try {
+  Thread.sleep(nbAttempt < 3 ? 500 : 1000);
+  continue; // retry
+} catch (InterruptedException ie) {
+  InterruptedIOException iioe = new InterruptedIOException();
+  iioe.initCause(ie);
+  throw iioe;
+}
+  }
+  throw new LeaseNotRecoveredException(e);
+} else {
+  throw e;
 }
-throw new LeaseNotRecoveredException(e);
-  } else {
-throw e;
   }
+
+  // Rethrow the original exception if we are not retrying due to 
HDFS-isms.
+  throw e;
 }
   }
 } catch (IOException ie) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hb

[hbase] branch branch-2.2 updated: HBASE-25692 Always try to close the WAL reader when we catch any exception (#3090)

2021-03-29 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new a227a4a  HBASE-25692 Always try to close the WAL reader when we catch 
any exception (#3090)
a227a4a is described below

commit a227a4aa371b2a591c4bf9f92a27d2cdcf749eae
Author: Josh Elser 
AuthorDate: Mon Mar 29 15:15:58 2021 -0400

HBASE-25692 Always try to close the WAL reader when we catch any exception 
(#3090)

There are code paths in which we throw non-IOExceptions when
initializing a WAL reader. However, we only close the InputStream to the
WAL filesystem when the exception is an IOException. Close it if it is
open in all cases.

Co-authored-by: Josh Elser 
Signed-off-by: Andrew Purtell 
---
 .../org/apache/hadoop/hbase/wal/WALFactory.java|  58 -
 .../apache/hadoop/hbase/wal/FileSystemProxy.java   | 105 +
 .../apache/hadoop/hbase/wal/TestWALFactory.java| 129 +
 3 files changed, 266 insertions(+), 26 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index dab65f3..333f24d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -320,7 +320,9 @@ public class WALFactory {
   reader = lrClass.getDeclaredConstructor().newInstance();
   reader.init(fs, path, conf, null);
   return reader;
-} catch (IOException e) {
+} catch (Exception e) {
+  // catch Exception so that we close reader for all exceptions. If we 
don't
+  // close the reader, we leak a socket.
   if (reader != null) {
 try {
   reader.close();
@@ -330,34 +332,38 @@ public class WALFactory {
 }
   }
 
-  String msg = e.getMessage();
-  if (msg != null
-  && (msg.contains("Cannot obtain block length")
-  || msg.contains("Could not obtain the last block") || msg
-.matches("Blocklist for [^ ]* has changed.*"))) {
-if (++nbAttempt == 1) {
-  LOG.warn("Lease should have recovered. This is not expected. 
Will retry", e);
-}
-if (reporter != null && !reporter.progress()) {
-  throw new InterruptedIOException("Operation is cancelled");
-}
-if (nbAttempt > 2 && openTimeout < 
EnvironmentEdgeManager.currentTime()) {
-  LOG.error("Can't open after " + nbAttempt + " attempts and "
-  + (EnvironmentEdgeManager.currentTime() - startWaiting) + 
"ms " + " for " + path);
-} else {
-  try {
-Thread.sleep(nbAttempt < 3 ? 500 : 1000);
-continue; // retry
-  } catch (InterruptedException ie) {
-InterruptedIOException iioe = new InterruptedIOException();
-iioe.initCause(ie);
-throw iioe;
+  // Only inspect the Exception to consider retry when it's an 
IOException
+  if (e instanceof IOException) {
+String msg = e.getMessage();
+if (msg != null
+&& (msg.contains("Cannot obtain block length")
+|| msg.contains("Could not obtain the last block") || msg
+  .matches("Blocklist for [^ ]* has changed.*"))) {
+  if (++nbAttempt == 1) {
+LOG.warn("Lease should have recovered. This is not expected. 
Will retry", e);
+  }
+  if (reporter != null && !reporter.progress()) {
+throw new InterruptedIOException("Operation is cancelled");
   }
+  if (nbAttempt > 2 && openTimeout < 
EnvironmentEdgeManager.currentTime()) {
+LOG.error("Can't open after " + nbAttempt + " attempts and "
++ (EnvironmentEdgeManager.currentTime() - startWaiting) + 
"ms " + " for " + path);
+  } else {
+try {
+  Thread.sleep(nbAttempt < 3 ? 500 : 1000);
+  continue; // retry
+} catch (InterruptedException ie) {
+  InterruptedIOException iioe = new InterruptedIOException();
+  iioe.initCause(ie);
+  throw iioe;
+}
+  }
+  throw new LeaseNotRecoveredException(e);
 }
-throw new LeaseNotRecoveredException(

[hbase] branch branch-2.3 updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 2333f8a  HBASE-25601 Use ASF-official mailing list archives
2333f8a is described below

commit 2333f8a73dbd0fbb04363f9e2d6eb53fb48d80fa
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 RELEASENOTES.md  |  2 +-
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   |  2 +-
 src/main/asciidoc/_chapters/developer.adoc   |  8 
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 16 +++-
 10 files changed, 17 insertions(+), 33 deletions(-)

diff --git a/RELEASENOTES.md b/RELEASENOTES.md
index 6f8a457..74d7fb4 100644
--- a/RELEASENOTES.md
+++ b/RELEASENOTES.md
@@ -10893,7 +10893,7 @@ MVCC methods cleaned up. Make a bit more sense now. 
Less of them.
 
 Simplifies our update of MemStore/WAL. Now we update memstore AFTER we add to 
WAL (but before we sync). This fixes possible dataloss when two edits came in 
with same coordinates; we could order the edits in memstore differently to how 
they arrived in the WAL.
 
-Marked as an incompatible change because it breaks Distributed Log Replay, a 
feature we'd determined already was unreliable and to be removed (See 
http://search-hadoop.com/m/YGbbhTJpoal8GD1).
+Marked as an incompatible change because it breaks Distributed Log Replay, a 
feature we'd determined already was unreliable and to be removed.
 
 
 ---
diff --git a/pom.xml b/pom.xml
index d96a8c2..9afb1a2 100755
--- a/pom.xml
+++ b/pom.xml
@@ -111,7 +111,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-user/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -122,7 +121,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-dev/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index ffe209e..91a596d 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,13 +37,13 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
+Here is how we agreed to set versions in JIRA when we resolve an issue.
 If master is going to be 2.0.0, and branch-1 1.4.0 then:
 
 * Commit only to master: Mark with 2.0.0
@@ -54,7 +54,7 @@ If master is going to be 2.0.0, and branch-1 1.4.0 then:
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA.
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA.
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -99,7 +99,7 @@ NOTE: End-of-life releases are not included in this list.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following Git 
commit message format:
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git

[hbase] branch master updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new a7d0445  HBASE-25601 Use ASF-official mailing list archives
a7d0445 is described below

commit a7d0445a2111499461500b458a920bf0219a6db1
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   |  2 +-
 src/main/asciidoc/_chapters/developer.adoc   |  6 +++---
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 16 +++-
 9 files changed, 15 insertions(+), 31 deletions(-)

diff --git a/pom.xml b/pom.xml
index e4505d6..43b4080 100755
--- a/pom.xml
+++ b/pom.xml
@@ -112,7 +112,6 @@
   
https://lists.apache.org/list.html?u...@hbase.apache.org
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -123,7 +122,6 @@
   
https://lists.apache.org/list.html?d...@hbase.apache.org
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index 3db6482..6af5d48 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,13 +37,13 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we
+Here is how we agreed to set versions in JIRA when we
 resolve an issue. If master is going to be 3.0.0, branch-2 will be 2.4.0, and 
branch-1 will be
 1.7.0 then:
 
@@ -59,7 +59,7 @@ resolve an issue. If master is going to be 3.0.0, branch-2 
will be 2.4.0, and br
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA.
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA.
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -101,7 +101,7 @@ NOTE: End-of-life releases are not included in this list.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following Git 
commit message format:
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git a/src/main/asciidoc/_chapters/compression.adoc 
b/src/main/asciidoc/_chapters/compression.adoc
index 9a1bf0c..5e1ff3a 100644
--- a/src/main/asciidoc/_chapters/compression.adoc
+++ b/src/main/asciidoc/_chapters/compression.adoc
@@ -31,8 +31,6 @@
 NOTE: Codecs mentioned in this section are for encoding and decoding data 
blocks or row keys.
 For information about replication codecs, see 
<>.
 
-Some of the information in this section is pulled from a 
link:http://search-hadoop.com/m/lL12B1PFVhp1/v=threaded[discussion] on the 
HBase Development mailing list.
-
 HBase supports several different compression algorithms which can be enabled 
on a ColumnFamily.
 Data block encoding attempts to limit duplication of information in keys, 
taking advantage of some of the fundamental designs and patterns of HBase, such 
as sorted row keys and the schema of a given table.
 Compr

[hbase] branch branch-2.4 updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new f368d54  HBASE-25601 Use ASF-official mailing list archives
f368d54 is described below

commit f368d54506a50f6b95633718325f5d58b7a35b52
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 RELEASENOTES.md  |  2 +-
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   |  2 +-
 src/main/asciidoc/_chapters/developer.adoc   |  8 
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 16 +++-
 10 files changed, 17 insertions(+), 33 deletions(-)

diff --git a/RELEASENOTES.md b/RELEASENOTES.md
index dfd6902..4f37839 100644
--- a/RELEASENOTES.md
+++ b/RELEASENOTES.md
@@ -17931,7 +17931,7 @@ MVCC methods cleaned up. Make a bit more sense now. 
Less of them.
 
 Simplifies our update of MemStore/WAL. Now we update memstore AFTER we add to 
WAL (but before we sync). This fixes possible dataloss when two edits came in 
with same coordinates; we could order the edits in memstore differently to how 
they arrived in the WAL.
 
-Marked as an incompatible change because it breaks Distributed Log Replay, a 
feature we'd determined already was unreliable and to be removed (See 
http://search-hadoop.com/m/YGbbhTJpoal8GD1).
+Marked as an incompatible change because it breaks Distributed Log Replay, a 
feature we'd determined already was unreliable and to be removed.
 
 
 ---
diff --git a/pom.xml b/pom.xml
index ad079c4..4b7bbcd 100755
--- a/pom.xml
+++ b/pom.xml
@@ -111,7 +111,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-user/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -122,7 +121,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-dev/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index ffe209e..91a596d 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,13 +37,13 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
+Here is how we agreed to set versions in JIRA when we resolve an issue.
 If master is going to be 2.0.0, and branch-1 1.4.0 then:
 
 * Commit only to master: Mark with 2.0.0
@@ -54,7 +54,7 @@ If master is going to be 2.0.0, and branch-1 1.4.0 then:
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA.
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA.
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -99,7 +99,7 @@ NOTE: End-of-life releases are not included in this list.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following Git 
commit message format:
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git

[hbase] branch branch-2.2 updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 2e09099  HBASE-25601 Use ASF-official mailing list archives
2e09099 is described below

commit 2e09099fc964ea51f6f5c20ced9495de6aeca2ad
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   |  8 ++--
 src/main/asciidoc/_chapters/developer.adoc   | 14 ++
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 16 +++-
 9 files changed, 23 insertions(+), 37 deletions(-)

diff --git a/pom.xml b/pom.xml
index 40ee164..e3d0871 100755
--- a/pom.xml
+++ b/pom.xml
@@ -109,7 +109,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-user/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -120,7 +119,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-dev/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index 3a429d6..4f82555 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,13 +37,13 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
+Here is how we agreed to set versions in JIRA when we resolve an issue.
 If master is going to be 2.0.0, and branch-1 1.4.0 then:
 
 * Commit only to master: Mark with 2.0.0
@@ -54,7 +54,7 @@ If master is going to be 2.0.0, and branch-1 1.4.0 then:
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA.
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA.
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -105,7 +105,7 @@ NOTE: End-of-life releases are not included in this list.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following Git 
commit message format:
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git a/src/main/asciidoc/_chapters/compression.adoc 
b/src/main/asciidoc/_chapters/compression.adoc
index b2ff5ce..a3082c0 100644
--- a/src/main/asciidoc/_chapters/compression.adoc
+++ b/src/main/asciidoc/_chapters/compression.adoc
@@ -31,8 +31,6 @@
 NOTE: Codecs mentioned in this section are for encoding and decoding data 
blocks or row keys.
 For information about replication codecs, see 
<>.
 
-Some of the information in this section is pulled from a 
link:http://search-hadoop.com/m/lL12B1PFVhp1/v=threaded[discussion] on the 
HBase Development mailing list.
-
 HBase supports several different compression algorithms which can be enabled 
on a ColumnFamily.
 Data block encoding attempts to limit duplication of information in keys, 
taking advantage of some of the fundamental designs and patterns of HBase, such 
as sorted row keys and the schema of a given

[hbase] branch branch-2 updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 33c9f77  HBASE-25601 Use ASF-official mailing list archives
33c9f77 is described below

commit 33c9f774d64b3905bb89e9b80ef4551078ca9131
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 RELEASENOTES.md  |  2 +-
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   |  2 +-
 src/main/asciidoc/_chapters/developer.adoc   |  8 
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 16 +++-
 10 files changed, 17 insertions(+), 33 deletions(-)

diff --git a/RELEASENOTES.md b/RELEASENOTES.md
index 78468fc..527e543 100644
--- a/RELEASENOTES.md
+++ b/RELEASENOTES.md
@@ -8457,7 +8457,7 @@ MVCC methods cleaned up. Make a bit more sense now. Less 
of them.
 
 Simplifies our update of MemStore/WAL. Now we update memstore AFTER we add to 
WAL (but before we sync). This fixes possible dataloss when two edits came in 
with same coordinates; we could order the edits in memstore differently to how 
they arrived in the WAL.
 
-Marked as an incompatible change because it breaks Distributed Log Replay, a 
feature we'd determined already was unreliable and to be removed (See 
http://search-hadoop.com/m/YGbbhTJpoal8GD1).
+Marked as an incompatible change because it breaks Distributed Log Replay, a 
feature we'd determined already was unreliable and to be removed.
 
 
 ---
diff --git a/pom.xml b/pom.xml
index a1c23e2..2e4ca35 100755
--- a/pom.xml
+++ b/pom.xml
@@ -111,7 +111,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-user/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -122,7 +121,6 @@
   http://mail-archives.apache.org/mod_mbox/hbase-dev/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index ffe209e..91a596d 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,13 +37,13 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
+Here is how we agreed to set versions in JIRA when we resolve an issue.
 If master is going to be 2.0.0, and branch-1 1.4.0 then:
 
 * Commit only to master: Mark with 2.0.0
@@ -54,7 +54,7 @@ If master is going to be 2.0.0, and branch-1 1.4.0 then:
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA.
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA.
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -99,7 +99,7 @@ NOTE: End-of-life releases are not included in this list.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following Git 
commit message format:
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git a/src

[hbase] branch branch-1.4 updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new 998b794  HBASE-25601 Use ASF-official mailing list archives
998b794 is described below

commit 998b794aabd0f52fde162ce1d1c373a5682107d9
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   | 15 +--
 src/main/asciidoc/_chapters/developer.adoc   | 14 ++
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 20 ++--
 9 files changed, 29 insertions(+), 42 deletions(-)

diff --git a/pom.xml b/pom.xml
index 9e51808..5bc05e8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -119,7 +119,6 @@
   https://mail-archives.apache.org/mod_mbox/hbase-user/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -130,7 +129,6 @@
   https://mail-archives.apache.org/mod_mbox/hbase-dev/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index 4b91b0d..5739df19 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,7 +37,7 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[patchplusonepolicy]]
@@ -61,7 +61,7 @@ Any -1 on a patch by anyone vetos a patch; it cannot be 
committed until the just
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
+Here is how we agreed to set versions in JIRA when we resolve an issue.
 If trunk is going to be 0.98.0 then: 
 
 * Commit only to trunk: Mark with 0.98 
@@ -73,7 +73,7 @@ If trunk is going to be 0.98.0 then:
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA. 
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA. 
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -103,7 +103,7 @@ Owners do not need to be committers.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following SVN 
commit message format: 
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git a/src/main/asciidoc/_chapters/compression.adoc 
b/src/main/asciidoc/_chapters/compression.adoc
index 78fc6a2..e1ed4dc 100644
--- a/src/main/asciidoc/_chapters/compression.adoc
+++ b/src/main/asciidoc/_chapters/compression.adoc
@@ -31,8 +31,6 @@
 NOTE: Codecs mentioned in this section are for encoding and decoding data 
blocks or row keys.
 For information about replication codecs, see 
<>.
 
-Some of the information in this section is pulled from a 
link:http://search-hadoop.com/m/lL12B1PFVhp1/v=threaded[discussion] on the 
HBase Development mailing list.
-
 HBase supports several different compression algorithms which can be enabled 
on a ColumnFamily.
 Data block encoding attempts to limit duplication of information in keys, 
taking advantage of some of the fundamental d

[hbase] branch branch-1 updated: HBASE-25601 Use ASF-official mailing list archives

2021-02-25 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new a7574ec  HBASE-25601 Use ASF-official mailing list archives
a7574ec is described below

commit a7574ec9a0b22d7999bbb82173cbee58517d2849
Author: Josh Elser 
AuthorDate: Wed Feb 24 11:15:30 2021 -0500

HBASE-25601 Use ASF-official mailing list archives

Signed-off-by: Peter Somogyi 
Signed-off-by: Duo Zhang 

Closes #2983
---
 pom.xml  |  2 --
 src/main/asciidoc/_chapters/community.adoc   |  8 
 src/main/asciidoc/_chapters/compression.adoc |  4 +---
 src/main/asciidoc/_chapters/configuration.adoc   |  8 ++--
 src/main/asciidoc/_chapters/developer.adoc   | 14 ++
 src/main/asciidoc/_chapters/ops_mgt.adoc |  1 -
 src/main/asciidoc/_chapters/performance.adoc |  3 +--
 src/main/asciidoc/_chapters/schema_design.adoc   |  4 ++--
 src/main/asciidoc/_chapters/troubleshooting.adoc | 16 +++-
 9 files changed, 23 insertions(+), 37 deletions(-)

diff --git a/pom.xml b/pom.xml
index e896df5..ae4dabe 100644
--- a/pom.xml
+++ b/pom.xml
@@ -119,7 +119,6 @@
   https://mail-archives.apache.org/mod_mbox/hbase-user/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.user
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
@@ -130,7 +129,6 @@
   https://mail-archives.apache.org/mod_mbox/hbase-dev/
   
 
https://dir.gmane.org/gmane.comp.java.hadoop.hbase.devel
-
https://search-hadoop.com/?q=fc_project=HBase
   
 
 
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index 4b91b0d..5739df19 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -37,7 +37,7 @@ Just request the name of your branch be added to JIRA up on 
the developer's mail
 Thereafter you can file issues against your feature branch in Apache HBase 
JIRA.
 Your code you keep elsewhere -- it should be public so it can be observed -- 
and you can update dev mailing list on progress.
 When the feature is ready for commit, 3 +1s from committers will get your 
feature merged.
-See link:http://search-hadoop.com/m/asM982C5FkS1[HBase, mail # dev - Thoughts
+See 
link:https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E[HBase,
 mail # dev - Thoughts
   about large feature dev branches]
 
 [[patchplusonepolicy]]
@@ -61,7 +61,7 @@ Any -1 on a patch by anyone vetos a patch; it cannot be 
committed until the just
 [[hbase.fix.version.in.jira]]
 .How to set fix version in JIRA on issue resolve
 
-Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
+Here is how we agreed to set versions in JIRA when we resolve an issue.
 If trunk is going to be 0.98.0 then: 
 
 * Commit only to trunk: Mark with 0.98 
@@ -73,7 +73,7 @@ If trunk is going to be 0.98.0 then:
 [[hbase.when.to.close.jira]]
 .Policy on when to set a RESOLVED JIRA as CLOSED
 
-We link:http://search-hadoop.com/m/4cIKs1iwXMS1[agreed] that for issues that 
list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the 
release of any of the versions listed; subsequent change to the issue must 
happen in a new JIRA. 
+We agreed that for issues that list multiple releases in their _Fix Version/s_ 
field, CLOSE the issue on the release of any of the versions listed; subsequent 
change to the issue must happen in a new JIRA. 
 
 [[no.permanent.state.in.zk]]
 .Only transient state in ZooKeeper!
@@ -103,7 +103,7 @@ Owners do not need to be committers.
 [[hbase.commit.msg.format]]
 == Commit Message format
 
-We link:http://search-hadoop.com/m/Gwxwl10cFHa1[agreed] to the following SVN 
commit message format: 
+We agreed to the following Git commit message format:
 [source]
 
 HBASE-x . ()
diff --git a/src/main/asciidoc/_chapters/compression.adoc 
b/src/main/asciidoc/_chapters/compression.adoc
index 78fc6a2..e1ed4dc 100644
--- a/src/main/asciidoc/_chapters/compression.adoc
+++ b/src/main/asciidoc/_chapters/compression.adoc
@@ -31,8 +31,6 @@
 NOTE: Codecs mentioned in this section are for encoding and decoding data 
blocks or row keys.
 For information about replication codecs, see 
<>.
 
-Some of the information in this section is pulled from a 
link:http://search-hadoop.com/m/lL12B1PFVhp1/v=threaded[discussion] on the 
HBase Development mailing list.
-
 HBase supports several different compression algorithms which can be enabled 
on a ColumnFamily.
 Data block encoding attempts to limit duplication of information in keys, 
taking advantage of some of the fundamental designs and patterns of

[hbase] branch master updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 3488c44  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
3488c44 is described below

commit 3488c44a21612aae1835fc3e91a4a12ed2abb8b7
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Tue Jan 12 09:06:13 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Revert of the revert -- re-applying HBASE-25449 with a change
of renaming the test hdfs XML configuration file as it was adversely
affecting tests using MiniDFS

This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1.

Co-authored-by: Josh Elser 

Signed-off-by: Peter Somogyi 
Signed-off-by: Michael Stack 
Signed-off-by: Duo Zhang 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 .../src/test/resources/hdfs-scr-disabled.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 9092dd1..20f3881 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..ffa94ba 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -116,6 +116,23 @@ public class TestHBaseConfiguration {
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
 
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-scr-disabled.xml");
+assertEquals("hdfs-scr-disabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
+
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
 "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set t

[hbase] branch branch-2.4 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new f429cfa  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
f429cfa is described below

commit f429cfac9a6b7d7f13c6475f9521dce4795e15cd
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Tue Jan 12 09:06:13 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Revert of the revert -- re-applying HBASE-25449 with a change
of renaming the test hdfs XML configuration file as it was adversely
affecting tests using MiniDFS

This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1.

Co-authored-by: Josh Elser 

Signed-off-by: Peter Somogyi 
Signed-off-by: Michael Stack 
Signed-off-by: Duo Zhang 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 .../src/test/resources/hdfs-scr-disabled.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index a1827a2..df94c5b 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1442,7 +1442,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1450,7 +1450,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..ffa94ba 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -116,6 +116,23 @@ public class TestHBaseConfiguration {
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
 
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-scr-disabled.xml");
+assertEquals("hdfs-scr-disabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
+
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
 "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set t

[hbase] branch branch-2.3 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 5bf4a1a  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
5bf4a1a is described below

commit 5bf4a1a3791d06ceabee918e4eaa801266f90a7f
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Tue Jan 12 09:06:13 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Revert of the revert -- re-applying HBASE-25449 with a change
of renaming the test hdfs XML configuration file as it was adversely
affecting tests using MiniDFS

This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1.

Co-authored-by: Josh Elser 

Signed-off-by: Peter Somogyi 
Signed-off-by: Michael Stack 
Signed-off-by: Duo Zhang 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 .../src/test/resources/hdfs-scr-disabled.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 6f84eba..62cf647 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1437,7 +1437,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1445,7 +1445,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..ffa94ba 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -116,6 +116,23 @@ public class TestHBaseConfiguration {
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
 
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-scr-disabled.xml");
+assertEquals("hdfs-scr-disabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
+
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
 "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set t

[hbase] branch branch-2 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 478c4e8  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
478c4e8 is described below

commit 478c4e8d6a64fbc5520717e630e3d60bac380aec
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Tue Jan 12 09:06:13 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Revert of the revert -- re-applying HBASE-25449 with a change
of renaming the test hdfs XML configuration file as it was adversely
affecting tests using MiniDFS

This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1.

Co-authored-by: Josh Elser 

Signed-off-by: Peter Somogyi 
Signed-off-by: Michael Stack 
Signed-off-by: Duo Zhang 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 .../src/test/resources/hdfs-scr-disabled.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index db1e3df..0908ca6 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1440,7 +1440,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1448,7 +1448,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..ffa94ba 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -116,6 +116,23 @@ public class TestHBaseConfiguration {
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
 
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-scr-disabled.xml");
+assertEquals("hdfs-scr-disabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
+
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
 "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set t

[hbase] branch branch-2.2 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-13 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 56b1df0  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
56b1df0 is described below

commit 56b1df0f099abf23c3e92ee046f0e0f71ef8a458
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Tue Jan 12 09:06:13 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Revert of the revert -- re-applying HBASE-25449 with a change
of renaming the test hdfs XML configuration file as it was adversely
affecting tests using MiniDFS

This reverts commit c218e576fe54df208e277365f1ac24f993f2a4b1.

Co-authored-by: Josh Elser 

Signed-off-by: Peter Somogyi 
Signed-off-by: Michael Stack 
Signed-off-by: Duo Zhang 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 .../src/test/resources/hdfs-scr-disabled.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 367b24a..435f4b4 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1378,7 +1378,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1386,7 +1386,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..ffa94ba 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -116,6 +116,23 @@ public class TestHBaseConfiguration {
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
 
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-scr-disabled.xml");
+assertEquals("hdfs-scr-disabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
+
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
 "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory";
diff --git a/hbase-common/src/test/resources/hdfs-scr-disabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-disabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set t

[hbase] branch master updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-08 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 49aba57  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
49aba57 is described below

commit 49aba571813f649a2ff4482a2209ee9910cc72c3
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Fri Jan 8 14:10:30 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Signed-off-by: Josh Elser 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 hbase-common/src/test/resources/hdfs-default.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 9092dd1..20f3881 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1461,7 +1461,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1469,7 +1469,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..1144f1d 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -115,6 +115,23 @@ public class TestHBaseConfiguration {
 conf.set("hbase.security.authentication", "KERBeros");
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
+  
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-default.xml");
+assertEquals("hdfs-default.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
 
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
diff --git a/hbase-common/src/test/resources/hdfs-default.xml 
b/hbase-common/src/test/resources/hdfs-default.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-default.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+/var/lib/hadoop-hdfs/dn_socket
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+



[hbase] branch branch-2.4 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-08 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 09db88d  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
09db88d is described below

commit 09db88d3ba93962c5c97a4947ffe82c2d54fffe7
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Fri Jan 8 14:10:30 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Signed-off-by: Josh Elser 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 hbase-common/src/test/resources/hdfs-default.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index a1827a2..df94c5b 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1442,7 +1442,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1450,7 +1450,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..1144f1d 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -115,6 +115,23 @@ public class TestHBaseConfiguration {
 conf.set("hbase.security.authentication", "KERBeros");
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
+  
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-default.xml");
+assertEquals("hdfs-default.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
 
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
diff --git a/hbase-common/src/test/resources/hdfs-default.xml 
b/hbase-common/src/test/resources/hdfs-default.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-default.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+/var/lib/hadoop-hdfs/dn_socket
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+



[hbase] branch branch-2.3 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-08 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new a17a264  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
a17a264 is described below

commit a17a264b375ba5a413127150297a7e7f0dab4281
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Fri Jan 8 14:10:30 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Signed-off-by: Josh Elser 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 hbase-common/src/test/resources/hdfs-default.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 6f84eba..62cf647 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1437,7 +1437,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1445,7 +1445,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..1144f1d 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -115,6 +115,23 @@ public class TestHBaseConfiguration {
 conf.set("hbase.security.authentication", "KERBeros");
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
+  
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-default.xml");
+assertEquals("hdfs-default.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
 
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
diff --git a/hbase-common/src/test/resources/hdfs-default.xml 
b/hbase-common/src/test/resources/hdfs-default.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-default.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+/var/lib/hadoop-hdfs/dn_socket
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+



[hbase] branch branch-2 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-08 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 661bea9  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
661bea9 is described below

commit 661bea913581ff8a45a9c2525257e653b2b88b70
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Fri Jan 8 14:10:30 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Signed-off-by: Josh Elser 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 hbase-common/src/test/resources/hdfs-default.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index db1e3df..0908ca6 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1440,7 +1440,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1448,7 +1448,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..1144f1d 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -115,6 +115,23 @@ public class TestHBaseConfiguration {
 conf.set("hbase.security.authentication", "KERBeros");
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
+  
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-default.xml");
+assertEquals("hdfs-default.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
 
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
diff --git a/hbase-common/src/test/resources/hdfs-default.xml 
b/hbase-common/src/test/resources/hdfs-default.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-default.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+/var/lib/hadoop-hdfs/dn_socket
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+



[hbase] branch branch-2.2 updated: HBASE-25449 'dfs.client.read.shortcircuit' should not be set in hbase-default.xml

2021-01-08 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 9e4e808  HBASE-25449 'dfs.client.read.shortcircuit' should not be set 
in hbase-default.xml
9e4e808 is described below

commit 9e4e808701af356df17a85b32e1945aea0eb0ef6
Author: 申胜利 <48829688+shenshen...@users.noreply.github.com>
AuthorDate: Fri Jan 8 14:10:30 2021 -0500

HBASE-25449 'dfs.client.read.shortcircuit' should not be set in 
hbase-default.xml

Signed-off-by: Josh Elser 
---
 hbase-common/src/main/resources/hbase-default.xml  |  4 +--
 .../hadoop/hbase/TestHBaseConfiguration.java   | 17 +
 hbase-common/src/test/resources/hdfs-default.xml   | 42 ++
 .../src/test/resources/hdfs-scr-enabled.xml| 42 ++
 4 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 367b24a..435f4b4 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1378,7 +1378,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.client.read.shortcircuit
-false
+
 
   If set to true, this configuration parameter enables short-circuit local
   reads.
@@ -1386,7 +1386,7 @@ possible configurations would overwhelm and obscure the 
important.
   
   
 dfs.domain.socket.path
-none
+
 
   This is a path to a UNIX domain socket that will be used for
   communication between the DataNode and local HDFS clients, if
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6a0b428..1144f1d 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -115,6 +115,23 @@ public class TestHBaseConfiguration {
 conf.set("hbase.security.authentication", "KERBeros");
 Assert.assertTrue(User.isHBaseSecurityEnabled(conf));
   }
+  
+  @Test
+  public void testGetConfigOfShortcircuitRead() throws Exception {
+Configuration conf = HBaseConfiguration.create();
+Configuration.addDefaultResource("hdfs-default.xml");
+assertEquals("hdfs-default.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("false", conf.get("dfs.client.read.shortcircuit"));
+assertNull(conf.get("dfs.domain.socket.path"));
+Configuration.addDefaultResource("hdfs-scr-enabled.xml");
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.client.read.shortcircuit")[0]);
+assertEquals("hdfs-scr-enabled.xml",
+conf.getPropertySources("dfs.domain.socket.path")[0]);
+assertEquals("true", conf.get("dfs.client.read.shortcircuit"));
+assertEquals("/var/lib/hadoop-hdfs/dn_socket", 
conf.get("dfs.domain.socket.path"));
+  }
 
   private static class ReflectiveCredentialProviderClient {
 public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME =
diff --git a/hbase-common/src/test/resources/hdfs-default.xml 
b/hbase-common/src/test/resources/hdfs-default.xml
new file mode 100644
index 000..fdb3c36
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-default.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+false
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+
diff --git a/hbase-common/src/test/resources/hdfs-scr-enabled.xml 
b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
new file mode 100644
index 000..8594494
--- /dev/null
+++ b/hbase-common/src/test/resources/hdfs-scr-enabled.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+  
+dfs.client.read.shortcircuit
+true
+
+  If set to true, this configuration parameter enables short-circuit local
+  reads.
+
+  
+  
+dfs.domain.socket.path
+/var/lib/hadoop-hdfs/dn_socket
+
+Optional.  This is a path to a UNIX domain socket that will be used for
+  communication between the DataNode and local HDFS clients.
+  If the string "_PORT" is present in this path, it will be replaced by the
+  TCP port of the DataNode.
+
+  
+



[hbase] branch master updated: HBASE-25279 Make ZKWatcher ExecutorService launch daemon threads

2020-12-31 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new ec63cc3  HBASE-25279 Make ZKWatcher ExecutorService launch daemon 
threads
ec63cc3 is described below

commit ec63cc3144d923e83e9fcd7a35d54ec80d4782aa
Author: Josh Elser 
AuthorDate: Thu Nov 12 16:52:58 2020 -0500

HBASE-25279 Make ZKWatcher ExecutorService launch daemon threads

Closes #2651

Signed-off-by: Duo Zhang 
Signed-off-by: Yulin Niu 
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 136134d..7a9fdd6 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -97,7 +97,7 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   // It is ok to do it in a single thread because the Zookeeper ClientCnxn 
already serializes the
   // requests using a single while loop and hence there is no performance 
degradation.
   private final ExecutorService zkEventProcessor = 
Executors.newSingleThreadExecutor(
-new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d")
+new 
ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d").setDaemon(true)
   .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
 
   private final Configuration conf;



[hbase] branch branch-2 updated: HBASE-25279 Make ZKWatcher ExecutorService launch daemon threads

2020-12-31 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 46ea169  HBASE-25279 Make ZKWatcher ExecutorService launch daemon 
threads
46ea169 is described below

commit 46ea169ea7f69b8fc8a05851ea3921494536a5ab
Author: Josh Elser 
AuthorDate: Thu Nov 12 16:52:58 2020 -0500

HBASE-25279 Make ZKWatcher ExecutorService launch daemon threads

Closes #2651

Signed-off-by: Duo Zhang 
Signed-off-by: Yulin Niu 
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 136134d..7a9fdd6 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -97,7 +97,7 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   // It is ok to do it in a single thread because the Zookeeper ClientCnxn 
already serializes the
   // requests using a single while loop and hence there is no performance 
degradation.
   private final ExecutorService zkEventProcessor = 
Executors.newSingleThreadExecutor(
-new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d")
+new 
ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d").setDaemon(true)
   .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
 
   private final Configuration conf;



[hbase] branch master updated: HBASE-24268 REST and Thrift server do not handle the "doAs" parameter case insensitively

2020-11-24 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new fc33137  HBASE-24268 REST and Thrift server do not handle the "doAs" 
parameter case insensitively
fc33137 is described below

commit fc3313771ddbd83d68a47a649d4265d9a2941de9
Author: Richard Antal 
AuthorDate: Wed Nov 18 11:19:42 2020 +0100

HBASE-24268 REST and Thrift server do not handle the "doAs" parameter case 
insensitively

Closes #1843

Signed-off-by: Josh Elser 
Signed-off-by: Sean Busbey 
---
 .../hbase/http/ProxyUserAuthenticationFilter.java  | 19 ++
 .../hadoop/hbase/rest/RESTServletContainer.java|  5 +-
 .../hadoop/hbase/rest/TestSecureRESTServer.java| 72 +-
 .../hadoop/hbase/thrift/ThriftHttpServlet.java |  3 +-
 4 files changed, 80 insertions(+), 19 deletions(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
index 5fb17c9..182a4e1 100644
--- 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -150,6 +150,25 @@ public class ProxyUserAuthenticationFilter extends 
AuthenticationFilter {
 return false;
   }
 
+  /**
+   * The purpose of this function is to get the doAs parameter of a http 
request
+   * case insensitively
+   * @param request
+   * @return doAs parameter if exists or null otherwise
+   */
+  public static String getDoasFromHeader(final  HttpServletRequest request) {
+String doas = null;
+final Enumeration headers = request.getHeaderNames();
+while (headers.hasMoreElements()){
+  String header = headers.nextElement();
+  if (header.toLowerCase().equals("doas")){
+doas = request.getHeader(header);
+break;
+  }
+}
+return doas;
+  }
+
   public static HttpServletRequest toLowerCase(
   final HttpServletRequest request) {
 @SuppressWarnings("unchecked")
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
index 1cae45c..28cf4cb 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -30,6 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
 import 
org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer;
+import static 
org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase;
 
 /**
  * REST servlet container. It is used to get the remote request user
@@ -51,7 +53,8 @@ public class RESTServletContainer extends ServletContainer {
   @Override
   public void service(final HttpServletRequest request,
   final HttpServletResponse response) throws ServletException, IOException 
{
-final String doAsUserFromQuery = request.getParameter("doAs");
+final HttpServletRequest lowerCaseRequest = toLowerCase(request);
+final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas");
 RESTServlet servlet = RESTServlet.getInstance();
 if (doAsUserFromQuery != null) {
   Configuration conf = servlet.getConfiguration();
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
index 920cf45..47ef053 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.rest;
 
+import static 
org.apache.hadoop.hbase.rest.RESTServlet.HBASE_REST_SUPPORT_PROXYUSER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -24,6 +25,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.security.Principal;
@@ -115,6 +117,7 @@ public c

[hbase] branch branch-2 updated: HBASE-24268 REST and Thrift server do not handle the "doAs" parameter case insensitively

2020-11-24 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 5ce3e3e  HBASE-24268 REST and Thrift server do not handle the "doAs" 
parameter case insensitively
5ce3e3e is described below

commit 5ce3e3e12c19182150a1693dd410be9c602b6fea
Author: Richard Antal 
AuthorDate: Wed Nov 18 11:19:42 2020 +0100

HBASE-24268 REST and Thrift server do not handle the "doAs" parameter case 
insensitively

Closes #1843

Signed-off-by: Josh Elser 
Signed-off-by: Sean Busbey 
---
 .../hbase/http/ProxyUserAuthenticationFilter.java  | 19 ++
 .../hadoop/hbase/rest/RESTServletContainer.java|  5 +-
 .../hadoop/hbase/rest/TestSecureRESTServer.java| 72 +-
 .../hadoop/hbase/thrift/ThriftHttpServlet.java |  3 +-
 4 files changed, 80 insertions(+), 19 deletions(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
index 5fb17c9..182a4e1 100644
--- 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -150,6 +150,25 @@ public class ProxyUserAuthenticationFilter extends 
AuthenticationFilter {
 return false;
   }
 
+  /**
+   * The purpose of this function is to get the doAs parameter of a http 
request
+   * case insensitively
+   * @param request
+   * @return doAs parameter if exists or null otherwise
+   */
+  public static String getDoasFromHeader(final  HttpServletRequest request) {
+String doas = null;
+final Enumeration headers = request.getHeaderNames();
+while (headers.hasMoreElements()){
+  String header = headers.nextElement();
+  if (header.toLowerCase().equals("doas")){
+doas = request.getHeader(header);
+break;
+  }
+}
+return doas;
+  }
+
   public static HttpServletRequest toLowerCase(
   final HttpServletRequest request) {
 @SuppressWarnings("unchecked")
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
index 1cae45c..28cf4cb 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -30,6 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
 import 
org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer;
+import static 
org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase;
 
 /**
  * REST servlet container. It is used to get the remote request user
@@ -51,7 +53,8 @@ public class RESTServletContainer extends ServletContainer {
   @Override
   public void service(final HttpServletRequest request,
   final HttpServletResponse response) throws ServletException, IOException 
{
-final String doAsUserFromQuery = request.getParameter("doAs");
+final HttpServletRequest lowerCaseRequest = toLowerCase(request);
+final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas");
 RESTServlet servlet = RESTServlet.getInstance();
 if (doAsUserFromQuery != null) {
   Configuration conf = servlet.getConfiguration();
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
index 920cf45..47ef053 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.rest;
 
+import static 
org.apache.hadoop.hbase.rest.RESTServlet.HBASE_REST_SUPPORT_PROXYUSER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -24,6 +25,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider;
 
 import java.io.File;
+import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.security.Principal;
@@ -115,6 +117,7 @@ public c

[hbase] branch master updated: HBASE-25278 Add CACHE_BLOCKS option to count shell command

2020-11-24 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 1c09f24  HBASE-25278 Add CACHE_BLOCKS option to count shell command
1c09f24 is described below

commit 1c09f24b2d4991d8fcb4bd34889636bf8bda4ccf
Author: Josh Elser 
AuthorDate: Thu Nov 12 16:04:26 2020 -0500

HBASE-25278 Add CACHE_BLOCKS option to count shell command

Expose an argument on the `count` command which is passed to the
`setCacheBlocks` method on the Scan which the count command uses.

This is a quick and dirty approach to read all of the blocks for a table
into the block cache.

* Raise an error when the value isn't a boolean or the expected string

Closes #2650

Signed-off-by: Zach York 
Signed-off-by: Peter Somogyi 
---
 hbase-shell/src/main/ruby/hbase/table.rb  |  6 ++---
 hbase-shell/src/main/ruby/shell/commands/count.rb | 27 +--
 2 files changed, 28 insertions(+), 5 deletions(-)

diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 615cd58..9b216e0 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -303,18 +303,18 @@ EOF
 
 
#--
 # Count rows in a table
-def _count_internal(interval = 1000, scan = nil)
+def _count_internal(interval = 1000, scan = nil, cacheBlocks=false)
   raise(ArgumentError, 'Scan argument should be 
org.apache.hadoop.hbase.client.Scan') \
 unless scan.nil? || scan.is_a?(org.apache.hadoop.hbase.client.Scan)
   # We can safely set scanner caching with the first key only filter
 
   if scan.nil?
 scan = org.apache.hadoop.hbase.client.Scan.new
-scan.setCacheBlocks(false)
+scan.setCacheBlocks(cacheBlocks)
 scan.setCaching(10)
 scan.setFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new)
   else
-scan.setCacheBlocks(false)
+scan.setCacheBlocks(cacheBlocks)
 filter = scan.getFilter
 firstKeyOnlyFilter = 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new
 if filter.nil?
diff --git a/hbase-shell/src/main/ruby/shell/commands/count.rb 
b/hbase-shell/src/main/ruby/shell/commands/count.rb
index 03840d0..7052358 100644
--- a/hbase-shell/src/main/ruby/shell/commands/count.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/count.rb
@@ -49,6 +49,17 @@ t to table 't1', the corresponding commands would be:
  hbase> t.count FILTER => "
 (QualifierFilter (>=, 'binary:xyz')) AND (TimestampsFilter ( 123, 456))"
  hbase> t.count COLUMNS => ['c1', 'c2'], STARTROW => 'abc', STOPROW => 'xyz'
+
+By default, this operation does not cause any new blocks to be read into
+the RegionServer block cache. This is typically the desired action; however,
+if you want to force all blocks for a table to be loaded into the block cache
+on-demand, you can pass the 'CACHE_BLOCKS' option with a value of 'true'. A 
value
+of 'false' is the default and will result in no blocks being cached. This
+command can be used in conjunction with all other options.
+
+hbase> count 'ns1:t1', CACHE_BLOCKS => true
+hbase> count 'ns1:t1', CACHE_BLOCKS => 'true'
+hbase> count 'ns1:t1', INTERVAL => 10, CACHE_BLOCKS => false
 EOF
   end
 
@@ -60,17 +71,29 @@ EOF
 # If the second parameter is an integer, then it is the old command 
syntax
 params = { 'INTERVAL' => params } if params.is_a?(Integer)
 
+# Try to be nice and convert a string to a bool
+if params.include?('CACHE_BLOCKS') and 
params['CACHE_BLOCKS'].is_a?(String)
+  if params['CACHE_BLOCKS'].downcase == 'true'
+params['CACHE_BLOCKS'] = true
+  elsif params['CACHE_BLOCKS'].downcase == 'false'
+params['CACHE_BLOCKS'] = false
+  else
+raise(ArgumentError, "Expected CACHE_BLOCKS value to be a boolean 
or the string 'true' or 'false'")
+  end
+end
+
 # Merge params with defaults
 params = {
   'INTERVAL' => 1000,
-  'CACHE' => 10
+  'CACHE' => 10,
+  'CACHE_BLOCKS' => false
 }.merge(params)
 
 scan = table._hash_to_scan(params)
 # Call the counter method
 @start_time = Time.now
 formatter.header
-count = table._count_internal(params['INTERVAL'].to_i, scan) do |cnt, 
row|
+count = table._count_internal(params['INTERVAL'].to_i, scan, 
params['CACHE_BLOCKS']) do |cnt, row|
   formatter.row(["Current count: #{cnt}, row: #{row}"])
 end
 formatter.footer(count)



[hbase] branch branch-2 updated: HBASE-25278 Add CACHE_BLOCKS option to count shell command

2020-11-24 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 31917b0  HBASE-25278 Add CACHE_BLOCKS option to count shell command
31917b0 is described below

commit 31917b0a8ab1d7f90ab6997dcc80a38dcea98013
Author: Josh Elser 
AuthorDate: Thu Nov 12 16:04:26 2020 -0500

HBASE-25278 Add CACHE_BLOCKS option to count shell command

Expose an argument on the `count` command which is passed to the
`setCacheBlocks` method on the Scan which the count command uses.

This is a quick and dirty approach to read all of the blocks for a table
into the block cache.

* Raise an error when the value isn't a boolean or the expected string

Closes #2650

Signed-off-by: Zach York 
Signed-off-by: Peter Somogyi 
---
 hbase-shell/src/main/ruby/hbase/table.rb  |  6 ++---
 hbase-shell/src/main/ruby/shell/commands/count.rb | 27 +--
 2 files changed, 28 insertions(+), 5 deletions(-)

diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 4e8a186..d779261 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -303,18 +303,18 @@ EOF
 
 
#--
 # Count rows in a table
-def _count_internal(interval = 1000, scan = nil)
+def _count_internal(interval = 1000, scan = nil, cacheBlocks=false)
   raise(ArgumentError, 'Scan argument should be 
org.apache.hadoop.hbase.client.Scan') \
 unless scan.nil? || scan.is_a?(org.apache.hadoop.hbase.client.Scan)
   # We can safely set scanner caching with the first key only filter
 
   if scan.nil?
 scan = org.apache.hadoop.hbase.client.Scan.new
-scan.setCacheBlocks(false)
+scan.setCacheBlocks(cacheBlocks)
 scan.setCaching(10)
 scan.setFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new)
   else
-scan.setCacheBlocks(false)
+scan.setCacheBlocks(cacheBlocks)
 filter = scan.getFilter
 firstKeyOnlyFilter = 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new
 if filter.nil?
diff --git a/hbase-shell/src/main/ruby/shell/commands/count.rb 
b/hbase-shell/src/main/ruby/shell/commands/count.rb
index 03840d0..7052358 100644
--- a/hbase-shell/src/main/ruby/shell/commands/count.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/count.rb
@@ -49,6 +49,17 @@ t to table 't1', the corresponding commands would be:
  hbase> t.count FILTER => "
 (QualifierFilter (>=, 'binary:xyz')) AND (TimestampsFilter ( 123, 456))"
  hbase> t.count COLUMNS => ['c1', 'c2'], STARTROW => 'abc', STOPROW => 'xyz'
+
+By default, this operation does not cause any new blocks to be read into
+the RegionServer block cache. This is typically the desired action; however,
+if you want to force all blocks for a table to be loaded into the block cache
+on-demand, you can pass the 'CACHE_BLOCKS' option with a value of 'true'. A 
value
+of 'false' is the default and will result in no blocks being cached. This
+command can be used in conjunction with all other options.
+
+hbase> count 'ns1:t1', CACHE_BLOCKS => true
+hbase> count 'ns1:t1', CACHE_BLOCKS => 'true'
+hbase> count 'ns1:t1', INTERVAL => 10, CACHE_BLOCKS => false
 EOF
   end
 
@@ -60,17 +71,29 @@ EOF
 # If the second parameter is an integer, then it is the old command 
syntax
 params = { 'INTERVAL' => params } if params.is_a?(Integer)
 
+# Try to be nice and convert a string to a bool
+if params.include?('CACHE_BLOCKS') and 
params['CACHE_BLOCKS'].is_a?(String)
+  if params['CACHE_BLOCKS'].downcase == 'true'
+params['CACHE_BLOCKS'] = true
+  elsif params['CACHE_BLOCKS'].downcase == 'false'
+params['CACHE_BLOCKS'] = false
+  else
+raise(ArgumentError, "Expected CACHE_BLOCKS value to be a boolean 
or the string 'true' or 'false'")
+  end
+end
+
 # Merge params with defaults
 params = {
   'INTERVAL' => 1000,
-  'CACHE' => 10
+  'CACHE' => 10,
+  'CACHE_BLOCKS' => false
 }.merge(params)
 
 scan = table._hash_to_scan(params)
 # Call the counter method
 @start_time = Time.now
 formatter.header
-count = table._count_internal(params['INTERVAL'].to_i, scan) do |cnt, 
row|
+count = table._count_internal(params['INTERVAL'].to_i, scan, 
params['CACHE_BLOCKS']) do |cnt, row|
   formatter.row(["Current count: #{cnt}, row: #{row}"])
 end
 formatter.footer(count)



[hbase-thirdparty] branch master updated: HBASE-24860 Bump copyright year

2020-08-11 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-thirdparty.git


The following commit(s) were added to refs/heads/master by this push:
 new 8bc00a9  HBASE-24860 Bump copyright year
8bc00a9 is described below

commit 8bc00a93679629a0e4c423e481198315473dc00b
Author: Josh Elser 
AuthorDate: Tue Aug 11 15:29:31 2020 -0400

HBASE-24860 Bump copyright year
---
 NOTICE.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/NOTICE.txt b/NOTICE.txt
index 405f5a4..614e40a 100644
--- a/NOTICE.txt
+++ b/NOTICE.txt
@@ -1,2 +1,2 @@
 Apache HBase - Third-Party
-Copyright 2018 The Apache Software Foundation
+Copyright 2020 The Apache Software Foundation



[hbase] branch branch-2 updated: HBASE-24779 Report on the WAL edit buffer usage/limit for replication

2020-08-07 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 303db63  HBASE-24779 Report on the WAL edit buffer usage/limit for 
replication
303db63 is described below

commit 303db63b7654db499923ac80c9be46e08a25b27b
Author: Josh Elser 
AuthorDate: Fri Aug 7 12:59:17 2020 -0400

HBASE-24779 Report on the WAL edit buffer usage/limit for replication

Closes #2193

Signed-off-by: Bharath Vissapragada 
Signed-off-by: Sean Busbey 
Signed-off-by: Wellington Chevreuil 
---
 ...a => MetricsReplicationGlobalSourceSource.java} | 24 
 .../MetricsReplicationSourceFactory.java   |  2 +-
 ... MetricsReplicationGlobalSourceSourceImpl.java} | 20 --
 .../MetricsReplicationSourceFactoryImpl.java   |  4 +-
 .../MetricsReplicationSourceSourceImpl.java|  2 +-
 .../replication/regionserver/MetricsSource.java| 19 -
 .../replication/regionserver/Replication.java  |  7 +++-
 .../regionserver/ReplicationSource.java|  4 +-
 .../regionserver/ReplicationSourceManager.java | 25 +++-
 .../regionserver/ReplicationSourceWALReader.java   | 11 --
 .../hbase/replication/TestReplicationEndpoint.java | 45 --
 .../regionserver/TestWALEntryStream.java   |  5 +++
 12 files changed, 142 insertions(+), 26 deletions(-)

diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
similarity index 56%
copy from 
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
copy to 
hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
index 2816f83..e373a6c 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,15 +15,25 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
-public interface MetricsReplicationSourceFactory {
-  public MetricsReplicationSinkSource getSink();
-  public MetricsReplicationSourceSource getSource(String id);
-  public MetricsReplicationTableSource getTableSource(String tableName);
-  public MetricsReplicationSourceSource getGlobalSource();
+public interface MetricsReplicationGlobalSourceSource extends 
MetricsReplicationSourceSource {
+
+  public static final String SOURCE_WAL_READER_EDITS_BUFFER = 
"source.walReaderEditsBufferUsage";
+
+  /**
+   * Sets the total usage of memory used by edits in memory read from WALs. 
The memory represented
+   * by this usage measure is across peers/sources. For example, we may batch 
the same WAL edits
+   * multiple times for the sake of replicating them to multiple peers..
+   * @param usage The memory used by edits in bytes
+   */
+  void setWALReaderEditsBufferBytes(long usage);
+
+  /**
+   * Returns the size, in bytes, of edits held in memory to be replicated 
across all peers.
+   */
+  long getWALReaderEditsBufferBytes();
 }
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
index 2816f83..5e4ad27 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java
@@ -25,5 +25,5 @@ public interface MetricsReplicationSourceFactory {
   public MetricsReplicationSinkSource getSink();
   public MetricsReplicationSourceSource getSource(String id);
   public MetricsReplicationTableSource getTableSource(String tableName);
-  public MetricsReplicationSourceSource getGlobalSource();
+  public MetricsReplicationGlobalSourceSource getGlobalSource();
 }
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
 
b/hbase-hadoo

[hbase] branch master updated: HBASE-24779 Report on the WAL edit buffer usage/limit for replication

2020-08-07 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 124af63  HBASE-24779 Report on the WAL edit buffer usage/limit for 
replication
124af63 is described below

commit 124af6392cdebff2fe2693c572a9564dc318eee5
Author: Josh Elser 
AuthorDate: Fri Aug 7 12:59:17 2020 -0400

HBASE-24779 Report on the WAL edit buffer usage/limit for replication

Closes #2193

Signed-off-by: Bharath Vissapragada 
Signed-off-by: Sean Busbey 
Signed-off-by: Wellington Chevreuil 
---
 .../MetricsReplicationGlobalSourceSource.java  | 248 ++---
 ... MetricsReplicationGlobalSourceSourceImpl.java} |  19 +-
 .../MetricsReplicationSourceFactory.java   |   2 +-
 .../MetricsReplicationSourceFactoryImpl.java   |   4 +-
 .../replication/regionserver/MetricsSource.java|  19 +-
 .../replication/regionserver/Replication.java  |   6 +-
 .../regionserver/ReplicationSource.java|   4 +-
 .../regionserver/ReplicationSourceManager.java |  25 ++-
 .../regionserver/ReplicationSourceWALReader.java   |  11 +-
 .../hbase/replication/TestReplicationEndpoint.java |  45 +++-
 .../regionserver/TestWALEntryStream.java   |   5 +
 11 files changed, 140 insertions(+), 248 deletions(-)

diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
index 630fdb8..e373a6c 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,239 +15,25 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication.regionserver;
 
-import org.apache.hadoop.metrics2.lib.MutableFastCounter;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
-import org.apache.hadoop.metrics2.lib.MutableHistogram;
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
-public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationSourceSource{
-  private static final String KEY_PREFIX = "source.";
-
-  private final MetricsReplicationSourceImpl rms;
-
-  private final MutableHistogram ageOfLastShippedOpHist;
-  private final MutableGaugeLong sizeOfLogQueueGauge;
-  private final MutableFastCounter logReadInEditsCounter;
-  private final MutableFastCounter walEditsFilteredCounter;
-  private final MutableFastCounter shippedBatchesCounter;
-  private final MutableFastCounter shippedOpsCounter;
-  private final MutableFastCounter shippedBytesCounter;
-  private final MutableFastCounter logReadInBytesCounter;
-  private final MutableFastCounter shippedHFilesCounter;
-  private final MutableGaugeLong sizeOfHFileRefsQueueGauge;
-  private final MutableFastCounter unknownFileLengthForClosedWAL;
-  private final MutableFastCounter uncleanlyClosedWAL;
-  private final MutableFastCounter uncleanlyClosedSkippedBytes;
-  private final MutableFastCounter restartWALReading;
-  private final MutableFastCounter repeatedFileBytes;
-  private final MutableFastCounter completedWAL;
-  private final MutableFastCounter completedRecoveryQueue;
-  private final MutableFastCounter failedRecoveryQueue;
-
-  public MetricsReplicationGlobalSourceSource(MetricsReplicationSourceImpl 
rms) {
-this.rms = rms;
-
-ageOfLastShippedOpHist = 
rms.getMetricsRegistry().getHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP);
-
-sizeOfLogQueueGauge = 
rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_LOG_QUEUE, 0L);
-
-shippedBatchesCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_SHIPPED_BATCHES, 0L);
-
-shippedOpsCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_SHIPPED_OPS, 0L);
-
-shippedBytesCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_SHIPPED_BYTES, 0L);
-
-logReadInBytesCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_LOG_READ_IN_BYTES, 0L);
-
-logReadInEditsCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_LOG_READ_IN_EDITS, 0L);
-
-walEditsFilteredCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_LOG_EDITS_FILTERED, 0L);
-
-shippedHFilesCounter = 
rms.getMetricsRegistry().getCounter(SOURCE_SHIPPED_HFILES, 0L);
-
-sizeOfHFileRefsQueueGauge =
-rms.getMetricsRegistry(

[hbase] branch branch-2.3 updated: HBASE-22146 Removing a namespace-level space quota does not remove policies against contained tables

2020-07-21 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 4b10892  HBASE-22146 Removing a namespace-level space quota does not 
remove policies against contained tables
4b10892 is described below

commit 4b10892445820688e360491747c464670ac1b5cb
Author: surbhi 
AuthorDate: Fri Jun 19 16:21:22 2020 -0700

HBASE-22146 Removing a namespace-level space quota does not remove policies 
against contained tables

Closes #1935

Signed-off-by: Josh Elser 
---
 .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 28 ++
 .../org/apache/hadoop/hbase/quotas/QuotaUtil.java  |  8 +++
 .../hbase/quotas/SpaceQuotaHelperForTests.java | 64 +-
 .../hadoop/hbase/quotas/TestSpaceQuotaRemoval.java | 58 
 4 files changed, 157 insertions(+), 1 deletion(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 4f14911..624b475 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -629,6 +629,34 @@ public class QuotaTableUtil {
   }
 
   /**
+   * Remove table usage snapshots (u:p columns) for the namespace passed
+   * @param connection connection to re-use
+   * @param namespace the namespace to fetch the list of table usage snapshots
+   */
+  static void deleteTableUsageSnapshotsForNamespace(Connection connection, 
String namespace)
+throws IOException {
+Scan s = new Scan();
+//Get rows for all tables in namespace
+s.setRowPrefixFilter(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, 
Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)));
+//Scan for table usage column (u:p) in quota table
+s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+//Scan for table quota column (q:s) if table has a space quota defined
+s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS);
+try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);
+ ResultScanner rs = quotaTable.getScanner(s)) {
+  for (Result r : rs) {
+byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
+//if table does not have a table space quota defined, delete table 
usage column (u:p)
+if (data == null) {
+  Delete delete = new Delete(r.getRow());
+  delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+  quotaTable.delete(delete);
+}
+  }
+}
+  }
+
+  /**
* Fetches the computed size of all snapshots against tables in a namespace 
for space quotas.
*/
   static long getNamespaceSnapshotSize(
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
index 9053405..1fc81e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -266,6 +266,14 @@ public class QuotaUtil extends QuotaTableUtil {
 if (qualifier != null) {
   delete.addColumns(QUOTA_FAMILY_INFO, qualifier);
 }
+if (isNamespaceRowKey(rowKey)) {
+  String ns = getNamespaceFromRowKey(rowKey);
+  Quotas namespaceQuota = getNamespaceQuota(connection,ns);
+  if (namespaceQuota != null && namespaceQuota.hasSpace()) {
+// When deleting namespace space quota, also delete table usage(u:p) 
snapshots
+deleteTableUsageSnapshotsForNamespace(connection, ns);
+  }
+}
 doDelete(connection, delete);
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index 93367b8..b066215 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -161,6 +161,28 @@ public class SpaceQuotaHelperForTests {
 return tn;
   }
 
+
+  TableName writeUntilViolationAndVerifyViolationInNamespace(
+  String ns, SpaceViolationPolicy policyToViolate, Mutation m) throws 
Exception {
+final TableName tn = writeUntilViolationInNamespace(ns, policyToViolate);
+verifyViolation(policyToViolate, tn, m);
+return tn;
+  }
+
+  TableName writeUntilViolationInNamespace(String ns, SpaceViolationPolicy 
policyToViolate) throws Exception {
+TableName tn = createTableWithRegions(ns,10);
+
+setQuotaLimit(ns, policyToViolate, 4L);
+
+// Write more data than should be allowed and flush it to disk
+writeDat

[hbase] branch branch-2 updated: HBASE-22146 Removing a namespace-level space quota does not remove policies against contained tables

2020-07-21 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 33102a1  HBASE-22146 Removing a namespace-level space quota does not 
remove policies against contained tables
33102a1 is described below

commit 33102a1265f9200b99553a44a9d70d1e420b9303
Author: surbhi 
AuthorDate: Fri Jun 19 16:21:22 2020 -0700

HBASE-22146 Removing a namespace-level space quota does not remove policies 
against contained tables

Closes #1935

Signed-off-by: Josh Elser 
---
 .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 28 ++
 .../org/apache/hadoop/hbase/quotas/QuotaUtil.java  |  8 +++
 .../hbase/quotas/SpaceQuotaHelperForTests.java | 64 +-
 .../hadoop/hbase/quotas/TestSpaceQuotaRemoval.java | 58 
 4 files changed, 157 insertions(+), 1 deletion(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 4f14911..624b475 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -629,6 +629,34 @@ public class QuotaTableUtil {
   }
 
   /**
+   * Remove table usage snapshots (u:p columns) for the namespace passed
+   * @param connection connection to re-use
+   * @param namespace the namespace to fetch the list of table usage snapshots
+   */
+  static void deleteTableUsageSnapshotsForNamespace(Connection connection, 
String namespace)
+throws IOException {
+Scan s = new Scan();
+//Get rows for all tables in namespace
+s.setRowPrefixFilter(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, 
Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)));
+//Scan for table usage column (u:p) in quota table
+s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+//Scan for table quota column (q:s) if table has a space quota defined
+s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS);
+try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);
+ ResultScanner rs = quotaTable.getScanner(s)) {
+  for (Result r : rs) {
+byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
+//if table does not have a table space quota defined, delete table 
usage column (u:p)
+if (data == null) {
+  Delete delete = new Delete(r.getRow());
+  delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+  quotaTable.delete(delete);
+}
+  }
+}
+  }
+
+  /**
* Fetches the computed size of all snapshots against tables in a namespace 
for space quotas.
*/
   static long getNamespaceSnapshotSize(
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
index 9053405..1fc81e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -266,6 +266,14 @@ public class QuotaUtil extends QuotaTableUtil {
 if (qualifier != null) {
   delete.addColumns(QUOTA_FAMILY_INFO, qualifier);
 }
+if (isNamespaceRowKey(rowKey)) {
+  String ns = getNamespaceFromRowKey(rowKey);
+  Quotas namespaceQuota = getNamespaceQuota(connection,ns);
+  if (namespaceQuota != null && namespaceQuota.hasSpace()) {
+// When deleting namespace space quota, also delete table usage(u:p) 
snapshots
+deleteTableUsageSnapshotsForNamespace(connection, ns);
+  }
+}
 doDelete(connection, delete);
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index 93367b8..b066215 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -161,6 +161,28 @@ public class SpaceQuotaHelperForTests {
 return tn;
   }
 
+
+  TableName writeUntilViolationAndVerifyViolationInNamespace(
+  String ns, SpaceViolationPolicy policyToViolate, Mutation m) throws 
Exception {
+final TableName tn = writeUntilViolationInNamespace(ns, policyToViolate);
+verifyViolation(policyToViolate, tn, m);
+return tn;
+  }
+
+  TableName writeUntilViolationInNamespace(String ns, SpaceViolationPolicy 
policyToViolate) throws Exception {
+TableName tn = createTableWithRegions(ns,10);
+
+setQuotaLimit(ns, policyToViolate, 4L);
+
+// Write more data than should be allowed and flush it to disk
+writeDat

[hbase] branch master updated: HBASE-22146 Removing a namespace-level space quota does not remove policies against contained tables

2020-07-21 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 8191fbd  HBASE-22146 Removing a namespace-level space quota does not 
remove policies against contained tables
8191fbd is described below

commit 8191fbdd7d4030dab05532e285206c8189cd37c7
Author: surbhi 
AuthorDate: Fri Jun 19 16:21:22 2020 -0700

HBASE-22146 Removing a namespace-level space quota does not remove policies 
against contained tables

Closes #1935

Signed-off-by: Josh Elser 
---
 .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 28 ++
 .../org/apache/hadoop/hbase/quotas/QuotaUtil.java  |  8 +++
 .../hbase/quotas/SpaceQuotaHelperForTests.java | 64 +-
 .../hadoop/hbase/quotas/TestSpaceQuotaRemoval.java | 58 
 4 files changed, 157 insertions(+), 1 deletion(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 4f14911..624b475 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -629,6 +629,34 @@ public class QuotaTableUtil {
   }
 
   /**
+   * Remove table usage snapshots (u:p columns) for the namespace passed
+   * @param connection connection to re-use
+   * @param namespace the namespace to fetch the list of table usage snapshots
+   */
+  static void deleteTableUsageSnapshotsForNamespace(Connection connection, 
String namespace)
+throws IOException {
+Scan s = new Scan();
+//Get rows for all tables in namespace
+s.setRowPrefixFilter(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, 
Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)));
+//Scan for table usage column (u:p) in quota table
+s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+//Scan for table quota column (q:s) if table has a space quota defined
+s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS);
+try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);
+ ResultScanner rs = quotaTable.getScanner(s)) {
+  for (Result r : rs) {
+byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
+//if table does not have a table space quota defined, delete table 
usage column (u:p)
+if (data == null) {
+  Delete delete = new Delete(r.getRow());
+  delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+  quotaTable.delete(delete);
+}
+  }
+}
+  }
+
+  /**
* Fetches the computed size of all snapshots against tables in a namespace 
for space quotas.
*/
   static long getNamespaceSnapshotSize(
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
index e6acc4b..1d816d6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -266,6 +266,14 @@ public class QuotaUtil extends QuotaTableUtil {
 if (qualifier != null) {
   delete.addColumns(QUOTA_FAMILY_INFO, qualifier);
 }
+if (isNamespaceRowKey(rowKey)) {
+  String ns = getNamespaceFromRowKey(rowKey);
+  Quotas namespaceQuota = getNamespaceQuota(connection,ns);
+  if (namespaceQuota != null && namespaceQuota.hasSpace()) {
+// When deleting namespace space quota, also delete table usage(u:p) 
snapshots
+deleteTableUsageSnapshotsForNamespace(connection, ns);
+  }
+}
 doDelete(connection, delete);
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index d522c0c..296d38f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -158,6 +158,28 @@ public class SpaceQuotaHelperForTests {
 return tn;
   }
 
+
+  TableName writeUntilViolationAndVerifyViolationInNamespace(
+  String ns, SpaceViolationPolicy policyToViolate, Mutation m) throws 
Exception {
+final TableName tn = writeUntilViolationInNamespace(ns, policyToViolate);
+verifyViolation(policyToViolate, tn, m);
+return tn;
+  }
+
+  TableName writeUntilViolationInNamespace(String ns, SpaceViolationPolicy 
policyToViolate) throws Exception {
+TableName tn = createTableWithRegions(ns,10);
+
+setQuotaLimit(ns, policyToViolate, 4L);
+
+// Write more data than should be allowed and flush it to disk
+writeDat

[hbase] branch branch-2.2 updated: HBASE-22146 Removing a namespace-level space quota does not remove policies against contained tables

2020-07-21 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 2c08876  HBASE-22146 Removing a namespace-level space quota does not 
remove policies against contained tables
2c08876 is described below

commit 2c08876d706fa2cfe762ca0267e6c71f811065cc
Author: surbhi 
AuthorDate: Fri Jun 19 16:21:22 2020 -0700

HBASE-22146 Removing a namespace-level space quota does not remove policies 
against contained tables

Closes #1935

Signed-off-by: Josh Elser 
---
 .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 28 ++
 .../org/apache/hadoop/hbase/quotas/QuotaUtil.java  |  8 +++
 .../hbase/quotas/SpaceQuotaHelperForTests.java | 64 +-
 .../hadoop/hbase/quotas/TestSpaceQuotaRemoval.java | 58 
 4 files changed, 157 insertions(+), 1 deletion(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 8ee60ea..3f452df 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -628,6 +628,34 @@ public class QuotaTableUtil {
   }
 
   /**
+   * Remove table usage snapshots (u:p columns) for the namespace passed
+   * @param connection connection to re-use
+   * @param namespace the namespace to fetch the list of table usage snapshots
+   */
+  static void deleteTableUsageSnapshotsForNamespace(Connection connection, 
String namespace)
+throws IOException {
+Scan s = new Scan();
+//Get rows for all tables in namespace
+s.setRowPrefixFilter(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, 
Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)));
+//Scan for table usage column (u:p) in quota table
+s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+//Scan for table quota column (q:s) if table has a space quota defined
+s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS);
+try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);
+ ResultScanner rs = quotaTable.getScanner(s)) {
+  for (Result r : rs) {
+byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
+//if table does not have a table space quota defined, delete table 
usage column (u:p)
+if (data == null) {
+  Delete delete = new Delete(r.getRow());
+  delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY);
+  quotaTable.delete(delete);
+}
+  }
+}
+  }
+
+  /**
* Fetches the computed size of all snapshots against tables in a namespace 
for space quotas.
*/
   static long getNamespaceSnapshotSize(
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
index 9053405..1fc81e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -266,6 +266,14 @@ public class QuotaUtil extends QuotaTableUtil {
 if (qualifier != null) {
   delete.addColumns(QUOTA_FAMILY_INFO, qualifier);
 }
+if (isNamespaceRowKey(rowKey)) {
+  String ns = getNamespaceFromRowKey(rowKey);
+  Quotas namespaceQuota = getNamespaceQuota(connection,ns);
+  if (namespaceQuota != null && namespaceQuota.hasSpace()) {
+// When deleting namespace space quota, also delete table usage(u:p) 
snapshots
+deleteTableUsageSnapshotsForNamespace(connection, ns);
+  }
+}
 doDelete(connection, delete);
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index 93367b8..b066215 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -161,6 +161,28 @@ public class SpaceQuotaHelperForTests {
 return tn;
   }
 
+
+  TableName writeUntilViolationAndVerifyViolationInNamespace(
+  String ns, SpaceViolationPolicy policyToViolate, Mutation m) throws 
Exception {
+final TableName tn = writeUntilViolationInNamespace(ns, policyToViolate);
+verifyViolation(policyToViolate, tn, m);
+return tn;
+  }
+
+  TableName writeUntilViolationInNamespace(String ns, SpaceViolationPolicy 
policyToViolate) throws Exception {
+TableName tn = createTableWithRegions(ns,10);
+
+setQuotaLimit(ns, policyToViolate, 4L);
+
+// Write more data than should be allowed and flush it to disk
+writeDat

[hbase] branch branch-1 updated: HBASE-19365 Guard against a missing table descriptor which crashes master

2020-06-17 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 404c2dd  HBASE-19365 Guard against a missing table descriptor which 
crashes master
404c2dd is described below

commit 404c2dd3fedb30b15418818630a272defc2ee5c7
Author: Josh Elser 
AuthorDate: Mon Jun 15 21:15:38 2020 -0400

HBASE-19365 Guard against a missing table descriptor which crashes master

While we never expect table descriptors to be missing, a corrupt meta
can result in the master crashing before regions get assigned. We can
guard against that happening with a simple null-check.

Signed-off-by: Viraj Jasani 

Closes #1908
---
 .../org/apache/hadoop/hbase/master/AssignmentManager.java  | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 78294a0..de4edbb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -3220,12 +3220,16 @@ public class AssignmentManager extends 
ZooKeeperListener {
   // maybe because it crashed.
   PairOfSameType p = 
MetaTableAccessor.getMergeRegions(result);
   if (p.getFirst() != null && p.getSecond() != null) {
-int numReplicas = server.getTableDescriptors().get(p.getFirst().
-getTable()).getRegionReplication();
-for (HRegionInfo merge : p) {
-  for (int i = 1; i < numReplicas; i++) {
-
replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i));
+HTableDescriptor desc = 
server.getTableDescriptors().get(p.getFirst().getTable());
+if (desc != null) {
+  int numReplicas = desc.getRegionReplication();
+  for (HRegionInfo merge : p) {
+for (int i = 1; i < numReplicas; i++) {
+  
replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i));
+}
   }
+} else {
+  LOG.warn("Found no table descriptor on filesystem for " + 
p.getFirst().getTable());
 }
   }
   RegionLocations rl =  MetaTableAccessor.getRegionLocations(result);



[hbase] branch branch-2.2 updated: HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

2020-06-12 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 0b73497  HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke 
the classes that NOT implements CanUnbuffer but its parents class implements 
CanUnbuffer
0b73497 is described below

commit 0b734974473998f7e16a39119609c629dbd21398
Author: zhaoym6 
AuthorDate: Tue Oct 22 21:57:24 2019 +0800

HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes 
that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

Closes #746

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../hadoop/hbase/io/FSDataInputStreamWrapper.java  |  41 ++
 .../hbase/io/TestFSDataInputStreamWrapper.java | 139 +
 2 files changed, 150 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
index 989d0aa..a33114e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
@@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.io;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InputStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -91,11 +90,6 @@ public class FSDataInputStreamWrapper implements Closeable {
   // reads without hbase checksum verification.
   private AtomicInteger hbaseChecksumOffCount = new AtomicInteger(-1);
 
-  private Boolean instanceOfCanUnbuffer = null;
-  // Using reflection to get org.apache.hadoop.fs.CanUnbuffer#unbuffer method 
to avoid compilation
-  // errors against Hadoop pre 2.6.4 and 2.7.1 versions.
-  private Method unbuffer = null;
-
   private final static ReadStatistics readStatistics = new ReadStatistics();
 
   private static class ReadStatistics {
@@ -105,6 +99,9 @@ public class FSDataInputStreamWrapper implements Closeable {
 long totalZeroCopyBytesRead;
   }
 
+  private Boolean instanceOfCanUnbuffer = null;
+  private CanUnbuffer unbuffer = null;
+
   public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException 
{
 this(fs, path, false, -1L);
   }
@@ -331,39 +328,23 @@ public class FSDataInputStreamWrapper implements 
Closeable {
   if (this.instanceOfCanUnbuffer == null) {
 // To ensure we compute whether the stream is instance of CanUnbuffer 
only once.
 this.instanceOfCanUnbuffer = false;
-Class[] streamInterfaces = streamClass.getInterfaces();
-for (Class c : streamInterfaces) {
-  if 
(c.getCanonicalName().toString().equals("org.apache.hadoop.fs.CanUnbuffer")) {
-try {
-  this.unbuffer = streamClass.getDeclaredMethod("unbuffer");
-} catch (NoSuchMethodException | SecurityException e) {
-  if (isLogTraceEnabled) {
-LOG.trace("Failed to find 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection "
-+ "left open in CLOSE_WAIT state.", e);
-  }
-  return;
-}
-this.instanceOfCanUnbuffer = true;
-break;
-  }
+if (wrappedStream instanceof CanUnbuffer) {
+  this.unbuffer = (CanUnbuffer) wrappedStream;
+  this.instanceOfCanUnbuffer = true;
 }
   }
   if (this.instanceOfCanUnbuffer) {
 try {
-  this.unbuffer.invoke(wrappedStream);
-} catch (IllegalAccessException | IllegalArgumentException | 
InvocationTargetException e) {
+  this.unbuffer.unbuffer();
+} catch (UnsupportedOperationException e){
   if (isLogTraceEnabled) {
 LOG.trace("Failed to invoke 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection left open in 
CLOSE_WAIT state.", e);
++ " . So there may be the stream does not support 
unbuffering.", e);
   }
 }
   } else {
 if (isLogTraceEnabled) {
-  LOG.trace("Failed to find 'unbuffer' method in class " + streamClass
-  + " . So there may be a TCP socket connection "
-  + "left open in CLOSE_WAIT state. For more details check &qu

[hbase] branch branch-2.3 updated: HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

2020-06-12 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new ea99bd8  HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke 
the classes that NOT implements CanUnbuffer but its parents class implements 
CanUnbuffer
ea99bd8 is described below

commit ea99bd8a89d68b2fc558b8862de7bf6d810d0808
Author: zhaoym6 
AuthorDate: Tue Oct 22 21:57:24 2019 +0800

HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes 
that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

Closes #746

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../hadoop/hbase/io/FSDataInputStreamWrapper.java  |  41 ++
 .../hbase/io/TestFSDataInputStreamWrapper.java | 139 +
 2 files changed, 150 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
index 67bca84..c1f9a7d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
@@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.io;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InputStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -91,11 +90,6 @@ public class FSDataInputStreamWrapper implements Closeable {
   // reads without hbase checksum verification.
   private AtomicInteger hbaseChecksumOffCount = new AtomicInteger(-1);
 
-  private Boolean instanceOfCanUnbuffer = null;
-  // Using reflection to get org.apache.hadoop.fs.CanUnbuffer#unbuffer method 
to avoid compilation
-  // errors against Hadoop pre 2.6.4 and 2.7.1 versions.
-  private Method unbuffer = null;
-
   private final static ReadStatistics readStatistics = new ReadStatistics();
 
   private static class ReadStatistics {
@@ -105,6 +99,9 @@ public class FSDataInputStreamWrapper implements Closeable {
 long totalZeroCopyBytesRead;
   }
 
+  private Boolean instanceOfCanUnbuffer = null;
+  private CanUnbuffer unbuffer = null;
+
   public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException 
{
 this(fs, path, false, -1L);
   }
@@ -331,39 +328,23 @@ public class FSDataInputStreamWrapper implements 
Closeable {
   if (this.instanceOfCanUnbuffer == null) {
 // To ensure we compute whether the stream is instance of CanUnbuffer 
only once.
 this.instanceOfCanUnbuffer = false;
-Class[] streamInterfaces = streamClass.getInterfaces();
-for (Class c : streamInterfaces) {
-  if 
(c.getCanonicalName().toString().equals("org.apache.hadoop.fs.CanUnbuffer")) {
-try {
-  this.unbuffer = streamClass.getDeclaredMethod("unbuffer");
-} catch (NoSuchMethodException | SecurityException e) {
-  if (isLogTraceEnabled) {
-LOG.trace("Failed to find 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection "
-+ "left open in CLOSE_WAIT state.", e);
-  }
-  return;
-}
-this.instanceOfCanUnbuffer = true;
-break;
-  }
+if (wrappedStream instanceof CanUnbuffer) {
+  this.unbuffer = (CanUnbuffer) wrappedStream;
+  this.instanceOfCanUnbuffer = true;
 }
   }
   if (this.instanceOfCanUnbuffer) {
 try {
-  this.unbuffer.invoke(wrappedStream);
-} catch (IllegalAccessException | IllegalArgumentException | 
InvocationTargetException e) {
+  this.unbuffer.unbuffer();
+} catch (UnsupportedOperationException e){
   if (isLogTraceEnabled) {
 LOG.trace("Failed to invoke 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection left open in 
CLOSE_WAIT state.", e);
++ " . So there may be the stream does not support 
unbuffering.", e);
   }
 }
   } else {
 if (isLogTraceEnabled) {
-  LOG.trace("Failed to find 'unbuffer' method in class " + streamClass
-  + " . So there may be a TCP socket connection "
-  + "left open in CLOSE_WAIT state. For more details check &qu

[hbase] branch branch-2 updated: HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

2020-06-12 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2d57595  HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke 
the classes that NOT implements CanUnbuffer but its parents class implements 
CanUnbuffer
2d57595 is described below

commit 2d57595f5481e56548afe8641a33dbce6663c79e
Author: zhaoym6 
AuthorDate: Tue Oct 22 21:57:24 2019 +0800

HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes 
that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

Closes #746

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../hadoop/hbase/io/FSDataInputStreamWrapper.java  |  41 ++
 .../hbase/io/TestFSDataInputStreamWrapper.java | 139 +
 2 files changed, 150 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
index 67bca84..c1f9a7d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
@@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.io;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InputStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -91,11 +90,6 @@ public class FSDataInputStreamWrapper implements Closeable {
   // reads without hbase checksum verification.
   private AtomicInteger hbaseChecksumOffCount = new AtomicInteger(-1);
 
-  private Boolean instanceOfCanUnbuffer = null;
-  // Using reflection to get org.apache.hadoop.fs.CanUnbuffer#unbuffer method 
to avoid compilation
-  // errors against Hadoop pre 2.6.4 and 2.7.1 versions.
-  private Method unbuffer = null;
-
   private final static ReadStatistics readStatistics = new ReadStatistics();
 
   private static class ReadStatistics {
@@ -105,6 +99,9 @@ public class FSDataInputStreamWrapper implements Closeable {
 long totalZeroCopyBytesRead;
   }
 
+  private Boolean instanceOfCanUnbuffer = null;
+  private CanUnbuffer unbuffer = null;
+
   public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException 
{
 this(fs, path, false, -1L);
   }
@@ -331,39 +328,23 @@ public class FSDataInputStreamWrapper implements 
Closeable {
   if (this.instanceOfCanUnbuffer == null) {
 // To ensure we compute whether the stream is instance of CanUnbuffer 
only once.
 this.instanceOfCanUnbuffer = false;
-Class[] streamInterfaces = streamClass.getInterfaces();
-for (Class c : streamInterfaces) {
-  if 
(c.getCanonicalName().toString().equals("org.apache.hadoop.fs.CanUnbuffer")) {
-try {
-  this.unbuffer = streamClass.getDeclaredMethod("unbuffer");
-} catch (NoSuchMethodException | SecurityException e) {
-  if (isLogTraceEnabled) {
-LOG.trace("Failed to find 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection "
-+ "left open in CLOSE_WAIT state.", e);
-  }
-  return;
-}
-this.instanceOfCanUnbuffer = true;
-break;
-  }
+if (wrappedStream instanceof CanUnbuffer) {
+  this.unbuffer = (CanUnbuffer) wrappedStream;
+  this.instanceOfCanUnbuffer = true;
 }
   }
   if (this.instanceOfCanUnbuffer) {
 try {
-  this.unbuffer.invoke(wrappedStream);
-} catch (IllegalAccessException | IllegalArgumentException | 
InvocationTargetException e) {
+  this.unbuffer.unbuffer();
+} catch (UnsupportedOperationException e){
   if (isLogTraceEnabled) {
 LOG.trace("Failed to invoke 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection left open in 
CLOSE_WAIT state.", e);
++ " . So there may be the stream does not support 
unbuffering.", e);
   }
 }
   } else {
 if (isLogTraceEnabled) {
-  LOG.trace("Failed to find 'unbuffer' method in class " + streamClass
-  + " . So there may be a TCP socket connection "
-  + "left open in CLOSE_WAIT state. For more details check "
- 

[hbase] branch master updated: HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

2020-06-12 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 21fe873  HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke 
the classes that NOT implements CanUnbuffer but its parents class implements 
CanUnbuffer
21fe873 is described below

commit 21fe873ebae80d5bf08d0c9f7ccaa42fef83c8f6
Author: zhaoym6 
AuthorDate: Tue Oct 22 21:57:24 2019 +0800

HBASE-23195 FSDataInputStreamWrapper unbuffer can NOT invoke the classes 
that NOT implements CanUnbuffer but its parents class implements CanUnbuffer

Closes #746

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../hadoop/hbase/io/FSDataInputStreamWrapper.java  |  41 ++
 .../hbase/io/TestFSDataInputStreamWrapper.java | 139 +
 2 files changed, 150 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
index 67bca84..c1f9a7d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
@@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.io;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InputStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -91,11 +90,6 @@ public class FSDataInputStreamWrapper implements Closeable {
   // reads without hbase checksum verification.
   private AtomicInteger hbaseChecksumOffCount = new AtomicInteger(-1);
 
-  private Boolean instanceOfCanUnbuffer = null;
-  // Using reflection to get org.apache.hadoop.fs.CanUnbuffer#unbuffer method 
to avoid compilation
-  // errors against Hadoop pre 2.6.4 and 2.7.1 versions.
-  private Method unbuffer = null;
-
   private final static ReadStatistics readStatistics = new ReadStatistics();
 
   private static class ReadStatistics {
@@ -105,6 +99,9 @@ public class FSDataInputStreamWrapper implements Closeable {
 long totalZeroCopyBytesRead;
   }
 
+  private Boolean instanceOfCanUnbuffer = null;
+  private CanUnbuffer unbuffer = null;
+
   public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException 
{
 this(fs, path, false, -1L);
   }
@@ -331,39 +328,23 @@ public class FSDataInputStreamWrapper implements 
Closeable {
   if (this.instanceOfCanUnbuffer == null) {
 // To ensure we compute whether the stream is instance of CanUnbuffer 
only once.
 this.instanceOfCanUnbuffer = false;
-Class[] streamInterfaces = streamClass.getInterfaces();
-for (Class c : streamInterfaces) {
-  if 
(c.getCanonicalName().toString().equals("org.apache.hadoop.fs.CanUnbuffer")) {
-try {
-  this.unbuffer = streamClass.getDeclaredMethod("unbuffer");
-} catch (NoSuchMethodException | SecurityException e) {
-  if (isLogTraceEnabled) {
-LOG.trace("Failed to find 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection "
-+ "left open in CLOSE_WAIT state.", e);
-  }
-  return;
-}
-this.instanceOfCanUnbuffer = true;
-break;
-  }
+if (wrappedStream instanceof CanUnbuffer) {
+  this.unbuffer = (CanUnbuffer) wrappedStream;
+  this.instanceOfCanUnbuffer = true;
 }
   }
   if (this.instanceOfCanUnbuffer) {
 try {
-  this.unbuffer.invoke(wrappedStream);
-} catch (IllegalAccessException | IllegalArgumentException | 
InvocationTargetException e) {
+  this.unbuffer.unbuffer();
+} catch (UnsupportedOperationException e){
   if (isLogTraceEnabled) {
 LOG.trace("Failed to invoke 'unbuffer' method in class " + 
streamClass
-+ " . So there may be a TCP socket connection left open in 
CLOSE_WAIT state.", e);
++ " . So there may be the stream does not support 
unbuffering.", e);
   }
 }
   } else {
 if (isLogTraceEnabled) {
-  LOG.trace("Failed to find 'unbuffer' method in class " + streamClass
-  + " . So there may be a TCP socket connection "
-  + "left open in CLOSE_WAIT state. For more details check "
- 

[hbase] branch master updated: HBASE-8458 Support for batch version of checkAndMutate()

2020-06-10 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new efd5a96  HBASE-8458 Support for batch version of checkAndMutate()
efd5a96 is described below

commit efd5a962e5a6aa07dcba4b55f8b165ea3dbbd6ef
Author: Toshihiro Suzuki 
AuthorDate: Mon May 4 16:53:41 2020 +0900

HBASE-8458 Support for batch version of checkAndMutate()

Closes #1648

Signed-off-by: Josh Elser 
---
 .../hbase/client/AsyncBatchRpcRetryingCaller.java  |  16 +-
 .../org/apache/hadoop/hbase/client/AsyncTable.java |  47 ++
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |  11 +
 .../apache/hadoop/hbase/client/CheckAndMutate.java | 362 ++
 .../org/apache/hadoop/hbase/client/Mutation.java   |  20 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  62 +-
 .../java/org/apache/hadoop/hbase/client/Table.java |  41 ++
 .../hadoop/hbase/client/TableOverAsyncTable.java   |  10 +
 .../hbase/shaded/protobuf/RequestConverter.java| 215 +++---
 .../hbase/shaded/protobuf/ResponseConverter.java   |  21 +-
 .../src/main/protobuf/client/Client.proto  |  11 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java |  11 +
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 262 ++--
 .../hadoop/hbase/client/DummyAsyncTable.java   |  10 +
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 732 -
 .../hadoop/hbase/client/TestAsyncTableBatch.java   |  54 ++
 .../hadoop/hbase/client/TestCheckAndMutate.java| 574 +++-
 .../hadoop/hbase/client/TestFromClientSide3.java   |  54 ++
 .../hbase/client/TestMalformedCellFromClient.java  |   5 +-
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |  11 +
 20 files changed, 2270 insertions(+), 259 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 464eff5..7e05b05 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -256,7 +256,7 @@ class AsyncBatchRpcRetryingCaller {
   }
 
   private ClientProtos.MultiRequest buildReq(Map 
actionsByRegion,
-  List cells, Map rowMutationsIndexMap) 
throws IOException {
+  List cells, Map indexMap) throws 
IOException {
 ClientProtos.MultiRequest.Builder multiRequestBuilder = 
ClientProtos.MultiRequest.newBuilder();
 ClientProtos.RegionAction.Builder regionActionBuilder = 
ClientProtos.RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
@@ -264,14 +264,14 @@ class AsyncBatchRpcRetryingCaller {
 for (Map.Entry entry : actionsByRegion.entrySet()) {
   long nonceGroup = conn.getNonceGenerator().getNonceGroup();
   // multiRequestBuilder will be populated with region actions.
-  // rowMutationsIndexMap will be non-empty after the call if there is 
RowMutations in the
+  // indexMap will be non-empty after the call if there is 
RowMutations/CheckAndMutate in the
   // action list.
   RequestConverter.buildNoDataRegionActions(entry.getKey(),
 entry.getValue().actions.stream()
   .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), 
a2.getOriginalIndex()))
   .collect(Collectors.toList()),
-cells, multiRequestBuilder, regionActionBuilder, actionBuilder, 
mutationBuilder, nonceGroup,
-rowMutationsIndexMap);
+cells, multiRequestBuilder, regionActionBuilder, actionBuilder, 
mutationBuilder,
+nonceGroup, indexMap);
 }
 return multiRequestBuilder.build();
   }
@@ -367,10 +367,10 @@ class AsyncBatchRpcRetryingCaller {
 List cells = new ArrayList<>();
 // Map from a created RegionAction to the original index for a 
RowMutations within
 // the original list of actions. This will be used to process the results 
when there
-// is RowMutations in the action list.
-Map rowMutationsIndexMap = new HashMap<>();
+// is RowMutations/CheckAndMutate in the action list.
+Map indexMap = new HashMap<>();
 try {
-  req = buildReq(serverReq.actionsByRegion, cells, rowMutationsIndexMap);
+  req = buildReq(serverReq.actionsByRegion, cells, indexMap);
 } catch (IOException e) {
   onError(serverReq.actionsByRegion, tries, e, serverName);
   return;
@@ -387,7 +387,7 @@ class AsyncBatchRpcRetryingCaller {
   } else {
 try {
   onComplete(serverReq.actionsByRegion, tries, serverName, 
ResponseConverter.getResults(req,
-rowMutationsIndexMap, resp, controller.cellScanner()));
+indexMap, resp, controller.cellScanner()));

[hbase] branch branch-2.3 updated: HBASE-24280 Skip explicit hadoop3 profile activation on master

2020-05-29 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new dba1281  HBASE-24280 Skip explicit hadoop3 profile activation on master
dba1281 is described below

commit dba128166de95c9c61981bf4f9fa90358372bc05
Author: Josh Elser 
AuthorDate: Fri May 1 18:18:06 2020 -0400

HBASE-24280 Skip explicit hadoop3 profile activation on master

On 2.x branches, we need to explicitly activate profiles for H3. On
master, all H2 support is dropped which means no special profiles are
required for H3 (though, there is still a profile there to encapsulate
H3 logic).

We need to make sure that the yetus invocation can correctly pass down
any profile information into the personality, so we activate the exact
profiles we want.

Closes #1609

Co-authored-by: Istvan Toth 
Signed-off-by: stack 
---
 dev-support/hbase-personality.sh  | 20 +++-
 dev-support/hbase_nightly_yetus.sh|  6 +-
 dev-support/jenkins_precommit_github_yetus.sh |  6 +-
 3 files changed, 25 insertions(+), 7 deletions(-)

diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 735e133..8ed1a83 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -145,8 +145,11 @@ function personality_modules
 extra="${extra} -Dhttps.protocols=TLSv1.2"
   fi
 
-  if [[ -n "${HADOOP_PROFILE}" ]]; then
-extra="${extra} -Phadoop-${HADOOP_PROFILE}"
+  # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
+  # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
+  # logic is not both activated within Maven.
+  if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" =~ branch-2* ]] ; 
then
+extra="${extra} -Dhadoop.profile=${HADOOP_PROFILE}"
   fi
 
   # BUILDMODE value is 'full' when there is no patch to be tested, and we are 
running checks on
@@ -458,8 +461,11 @@ function shadedjars_rebuild
 '-pl' 'hbase-shaded/hbase-shaded-check-invariants' '-am'
 '-Dtest=NoUnitTests' '-DHBasePatchProcess' '-Prelease'
 '-Dmaven.javadoc.skip=true' '-Dcheckstyle.skip=true' 
'-Dspotbugs.skip=true')
-  if [[ -n "${HADOOP_PROFILE}" ]]; then
-maven_args+=("-Phadoop-${HADOOP_PROFILE}")
+  # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
+  # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
+  # logic is not both activated within Maven.
+  if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" =~ branch-2* ]] ; 
then
+maven_args+=("-Dhadoop.profile=${HADOOP_PROFILE}")
   fi
 
   # disabled because "maven_executor" needs to return both command and args
@@ -636,6 +642,10 @@ function hadoopcheck_rebuild
 fi
   done
 
+  hadoop_profile=""
+  if [[ "${PATCH_BRANCH}" =~ branch-2* ]]; then
+hadoop_profile="-Dhadoop.profile=3.0"
+  fi
   for hadoopver in ${hbase_hadoop3_versions}; do
 logfile="${PATCH_DIR}/patch-javac-${hadoopver}.txt"
 # disabled because "maven_executor" needs to return both command and args
@@ -644,7 +654,7 @@ function hadoopcheck_rebuild
   $(maven_executor) clean install \
 -DskipTests -DHBasePatchProcess \
 -Dhadoop-three.version="${hadoopver}" \
--Phadoop-3.0
+${hadoop_profile}
 count=$(${GREP} -c '\[ERROR\]' "${logfile}")
 if [[ ${count} -gt 0 ]]; then
   add_vote_table -1 hadoopcheck "${BUILDMODEMSG} causes ${count} errors 
with Hadoop v${hadoopver}."
diff --git a/dev-support/hbase_nightly_yetus.sh 
b/dev-support/hbase_nightly_yetus.sh
index ef91f20..65b5270 100755
--- a/dev-support/hbase_nightly_yetus.sh
+++ b/dev-support/hbase_nightly_yetus.sh
@@ -76,7 +76,11 @@ fi
 
 # For testing with specific hadoop version. Activates corresponding profile in 
maven runs.
 if [[ -n "${HADOOP_PROFILE}" ]]; then
-  YETUS_ARGS=("--hadoop-profile=${HADOOP_PROFILE}" "${YETUS_ARGS[@]}")
+  # Master has only Hadoop3 support. We don't need to activate any profile.
+  # The Jenkinsfile should not attempt to run any Hadoop2 tests.
+  if [[ "${BRANCH_NAME}" =~ branch-2* ]]; then
+YETUS_ARGS=("--hadoop-profile=${HADOOP_PROFILE}" "${YETUS_ARGS[@]}")
+  fi
 fi
 
 if [[ -n "${SKIP_ERROR_PRONE}" ]]; then
diff --git a/dev-support/jenkins_precommit_github_yetus.sh 
b/dev-support/jenkins_precommit_github_yetus.sh
index bc7221b..1c489d6 100755
--- a/dev-support/jenkins_precommit_github_yetus.sh
+++ b/dev-support/jenkins_precommit_github_yetus.sh
@@ -127,7 +127,11 @@ YETUS_ARGS+=("--skip-

[hbase] branch branch-2 updated: HBASE-24280 Skip explicit hadoop3 profile activation on master

2020-05-29 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 99a40dd  HBASE-24280 Skip explicit hadoop3 profile activation on master
99a40dd is described below

commit 99a40ddacf19f7ae9d89cdc4778384ffd6a0e394
Author: Josh Elser 
AuthorDate: Fri May 1 18:18:06 2020 -0400

HBASE-24280 Skip explicit hadoop3 profile activation on master

On 2.x branches, we need to explicitly activate profiles for H3. On
master, all H2 support is dropped which means no special profiles are
required for H3 (though, there is still a profile there to encapsulate
H3 logic).

We need to make sure that the yetus invocation can correctly pass down
any profile information into the personality, so we activate the exact
profiles we want.

Closes #1609

Co-authored-by: Istvan Toth 
Signed-off-by: stack 
---
 dev-support/hbase-personality.sh  | 20 +++-
 dev-support/hbase_nightly_yetus.sh|  6 +-
 dev-support/jenkins_precommit_github_yetus.sh |  6 +-
 3 files changed, 25 insertions(+), 7 deletions(-)

diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 735e133..8ed1a83 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -145,8 +145,11 @@ function personality_modules
 extra="${extra} -Dhttps.protocols=TLSv1.2"
   fi
 
-  if [[ -n "${HADOOP_PROFILE}" ]]; then
-extra="${extra} -Phadoop-${HADOOP_PROFILE}"
+  # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
+  # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
+  # logic is not both activated within Maven.
+  if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" =~ branch-2* ]] ; 
then
+extra="${extra} -Dhadoop.profile=${HADOOP_PROFILE}"
   fi
 
   # BUILDMODE value is 'full' when there is no patch to be tested, and we are 
running checks on
@@ -458,8 +461,11 @@ function shadedjars_rebuild
 '-pl' 'hbase-shaded/hbase-shaded-check-invariants' '-am'
 '-Dtest=NoUnitTests' '-DHBasePatchProcess' '-Prelease'
 '-Dmaven.javadoc.skip=true' '-Dcheckstyle.skip=true' 
'-Dspotbugs.skip=true')
-  if [[ -n "${HADOOP_PROFILE}" ]]; then
-maven_args+=("-Phadoop-${HADOOP_PROFILE}")
+  # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
+  # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
+  # logic is not both activated within Maven.
+  if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" =~ branch-2* ]] ; 
then
+maven_args+=("-Dhadoop.profile=${HADOOP_PROFILE}")
   fi
 
   # disabled because "maven_executor" needs to return both command and args
@@ -636,6 +642,10 @@ function hadoopcheck_rebuild
 fi
   done
 
+  hadoop_profile=""
+  if [[ "${PATCH_BRANCH}" =~ branch-2* ]]; then
+hadoop_profile="-Dhadoop.profile=3.0"
+  fi
   for hadoopver in ${hbase_hadoop3_versions}; do
 logfile="${PATCH_DIR}/patch-javac-${hadoopver}.txt"
 # disabled because "maven_executor" needs to return both command and args
@@ -644,7 +654,7 @@ function hadoopcheck_rebuild
   $(maven_executor) clean install \
 -DskipTests -DHBasePatchProcess \
 -Dhadoop-three.version="${hadoopver}" \
--Phadoop-3.0
+${hadoop_profile}
 count=$(${GREP} -c '\[ERROR\]' "${logfile}")
 if [[ ${count} -gt 0 ]]; then
   add_vote_table -1 hadoopcheck "${BUILDMODEMSG} causes ${count} errors 
with Hadoop v${hadoopver}."
diff --git a/dev-support/hbase_nightly_yetus.sh 
b/dev-support/hbase_nightly_yetus.sh
index ef91f20..65b5270 100755
--- a/dev-support/hbase_nightly_yetus.sh
+++ b/dev-support/hbase_nightly_yetus.sh
@@ -76,7 +76,11 @@ fi
 
 # For testing with specific hadoop version. Activates corresponding profile in 
maven runs.
 if [[ -n "${HADOOP_PROFILE}" ]]; then
-  YETUS_ARGS=("--hadoop-profile=${HADOOP_PROFILE}" "${YETUS_ARGS[@]}")
+  # Master has only Hadoop3 support. We don't need to activate any profile.
+  # The Jenkinsfile should not attempt to run any Hadoop2 tests.
+  if [[ "${BRANCH_NAME}" =~ branch-2* ]]; then
+YETUS_ARGS=("--hadoop-profile=${HADOOP_PROFILE}" "${YETUS_ARGS[@]}")
+  fi
 fi
 
 if [[ -n "${SKIP_ERROR_PRONE}" ]]; then
diff --git a/dev-support/jenkins_precommit_github_yetus.sh 
b/dev-support/jenkins_precommit_github_yetus.sh
index bc7221b..1c489d6 100755
--- a/dev-support/jenkins_precommit_github_yetus.sh
+++ b/dev-support/jenkins_precommit_github_yetus.sh
@@ -127,7 +127,11 @@ YETUS_ARGS+=("--skip-

[hbase] branch master updated: HBASE-24280 Skip explicit hadoop3 profile activation on master

2020-05-01 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new e54eec7  HBASE-24280 Skip explicit hadoop3 profile activation on master
e54eec7 is described below

commit e54eec71c8ce270ea6a4ba96d0bf8dff0e0cc7d3
Author: Josh Elser 
AuthorDate: Fri May 1 18:18:06 2020 -0400

HBASE-24280 Skip explicit hadoop3 profile activation on master

On 2.x branches, we need to explicitly activate profiles for H3. On
master, all H2 support is dropped which means no special profiles are
required for H3 (though, there is still a profile there to encapsulate
H3 logic).

We need to make sure that the yetus invocation can correctly pass down
any profile information into the personality, so we activate the exact
profiles we want.

Closes #1609

Co-authored-by: Istvan Toth 
Signed-off-by: stack 
---
 dev-support/hbase-personality.sh  | 20 +++-
 dev-support/hbase_nightly_yetus.sh|  6 +-
 dev-support/jenkins_precommit_github_yetus.sh |  6 +-
 3 files changed, 25 insertions(+), 7 deletions(-)

diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 735e133..8ed1a83 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -145,8 +145,11 @@ function personality_modules
 extra="${extra} -Dhttps.protocols=TLSv1.2"
   fi
 
-  if [[ -n "${HADOOP_PROFILE}" ]]; then
-extra="${extra} -Phadoop-${HADOOP_PROFILE}"
+  # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
+  # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
+  # logic is not both activated within Maven.
+  if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" =~ branch-2* ]] ; 
then
+extra="${extra} -Dhadoop.profile=${HADOOP_PROFILE}"
   fi
 
   # BUILDMODE value is 'full' when there is no patch to be tested, and we are 
running checks on
@@ -458,8 +461,11 @@ function shadedjars_rebuild
 '-pl' 'hbase-shaded/hbase-shaded-check-invariants' '-am'
 '-Dtest=NoUnitTests' '-DHBasePatchProcess' '-Prelease'
 '-Dmaven.javadoc.skip=true' '-Dcheckstyle.skip=true' 
'-Dspotbugs.skip=true')
-  if [[ -n "${HADOOP_PROFILE}" ]]; then
-maven_args+=("-Phadoop-${HADOOP_PROFILE}")
+  # If we have HADOOP_PROFILE specified and we're on branch-2.x, pass along
+  # the hadoop.profile system property. Ensures that Hadoop2 and Hadoop3
+  # logic is not both activated within Maven.
+  if [[ -n "${HADOOP_PROFILE}" ]] && [[ "${PATCH_BRANCH}" =~ branch-2* ]] ; 
then
+maven_args+=("-Dhadoop.profile=${HADOOP_PROFILE}")
   fi
 
   # disabled because "maven_executor" needs to return both command and args
@@ -636,6 +642,10 @@ function hadoopcheck_rebuild
 fi
   done
 
+  hadoop_profile=""
+  if [[ "${PATCH_BRANCH}" =~ branch-2* ]]; then
+hadoop_profile="-Dhadoop.profile=3.0"
+  fi
   for hadoopver in ${hbase_hadoop3_versions}; do
 logfile="${PATCH_DIR}/patch-javac-${hadoopver}.txt"
 # disabled because "maven_executor" needs to return both command and args
@@ -644,7 +654,7 @@ function hadoopcheck_rebuild
   $(maven_executor) clean install \
 -DskipTests -DHBasePatchProcess \
 -Dhadoop-three.version="${hadoopver}" \
--Phadoop-3.0
+${hadoop_profile}
 count=$(${GREP} -c '\[ERROR\]' "${logfile}")
 if [[ ${count} -gt 0 ]]; then
   add_vote_table -1 hadoopcheck "${BUILDMODEMSG} causes ${count} errors 
with Hadoop v${hadoopver}."
diff --git a/dev-support/hbase_nightly_yetus.sh 
b/dev-support/hbase_nightly_yetus.sh
index ef91f20..65b5270 100755
--- a/dev-support/hbase_nightly_yetus.sh
+++ b/dev-support/hbase_nightly_yetus.sh
@@ -76,7 +76,11 @@ fi
 
 # For testing with specific hadoop version. Activates corresponding profile in 
maven runs.
 if [[ -n "${HADOOP_PROFILE}" ]]; then
-  YETUS_ARGS=("--hadoop-profile=${HADOOP_PROFILE}" "${YETUS_ARGS[@]}")
+  # Master has only Hadoop3 support. We don't need to activate any profile.
+  # The Jenkinsfile should not attempt to run any Hadoop2 tests.
+  if [[ "${BRANCH_NAME}" =~ branch-2* ]]; then
+YETUS_ARGS=("--hadoop-profile=${HADOOP_PROFILE}" "${YETUS_ARGS[@]}")
+  fi
 fi
 
 if [[ -n "${SKIP_ERROR_PRONE}" ]]; then
diff --git a/dev-support/jenkins_precommit_github_yetus.sh 
b/dev-support/jenkins_precommit_github_yetus.sh
index bc7221b..1c489d6 100755
--- a/dev-support/jenkins_precommit_github_yetus.sh
+++ b/dev-support/jenkins_precommit_github_yetus.sh
@@ -127,7 +127,11 @@ YETUS_ARGS+=("--skip-

[hbase] branch branch-2.3 updated: HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

2020-04-27 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new c78bbbc  HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http
c78bbbc is described below

commit c78bbbcdcb3d9864456e2c2d29300c10714d69de
Author: Josh Elser 
AuthorDate: Mon Apr 27 13:05:53 2020 -0400

HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

copy ProxyUserAuthenticationFilter from Hadoop
add hbase.security.authentication.spnego.kerberos.proxyuser.enable 
parameter (default false)
wire ProxyUserAuthenticationFilter into HttpServer

Signed-off-by: Sean Busbey 
Signed-off-by: Josh Elser 
---
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  19 +-
 .../hbase/http/ProxyUserAuthenticationFilter.java  | 219 +++
 .../hbase/http/HttpServerFunctionalTest.java   |  11 +
 .../hbase/http/TestProxyUserSpnegoHttpServer.java  | 295 +
 4 files changed, 543 insertions(+), 1 deletion(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 887380e..d3176ca 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.util.Shell;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
@@ -127,6 +128,10 @@ public class HttpServer implements FilterContainer {
   static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = 
"kerberos.name.rules";
   public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY =
   HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX;
+  static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = 
"kerberos.proxyuser.enable";
+  public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY =
+  HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX;
+  public static final boolean  
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false;
   static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX =
   "signature.secret.file";
   public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY =
@@ -145,6 +150,7 @@ public class HttpServer implements FilterContainer {
   public static final String ADMINS_ACL = "admins.acl";
   public static final String BIND_ADDRESS = "bind.address";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
+  public static final String SPNEGO_PROXYUSER_FILTER = "SpnegoProxyUserFilter";
   public static final String NO_CACHE_FILTER = "NoCacheFilter";
   public static final String APP_DIR = "webapps";
 
@@ -1026,7 +1032,18 @@ public class HttpServer implements FilterContainer {
   + "to enable SPNEGO/Kerberos authentication for the Web UI");
 }
 
-addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY,
+HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) {
+//Copy/rename standard hadoop proxyuser settings to filter
+for(Map.Entry proxyEntry :
+
conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + 
proxyEntry.getKey(),
+proxyEntry.getValue());
+}
+addGlobalFilter(SPNEGO_PROXYUSER_FILTER, 
ProxyUserAuthenticationFilter.class.getName(), params);
+} else {
+addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+}
   }
 
   /**
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
new file mode 100644
index 000..5fb17c9
--- /dev/null
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, 

[hbase] branch branch-2 updated: HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

2020-04-27 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 8eb2f67  HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http
8eb2f67 is described below

commit 8eb2f6752c720b0bad0cc29f981d9a60a00a5cb3
Author: Josh Elser 
AuthorDate: Mon Apr 27 13:05:53 2020 -0400

HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

copy ProxyUserAuthenticationFilter from Hadoop
add hbase.security.authentication.spnego.kerberos.proxyuser.enable 
parameter (default false)
wire ProxyUserAuthenticationFilter into HttpServer

Signed-off-by: Sean Busbey 
Signed-off-by: Josh Elser 
---
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  19 +-
 .../hbase/http/ProxyUserAuthenticationFilter.java  | 219 +++
 .../hbase/http/HttpServerFunctionalTest.java   |  11 +
 .../hbase/http/TestProxyUserSpnegoHttpServer.java  | 295 +
 4 files changed, 543 insertions(+), 1 deletion(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 887380e..d3176ca 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.util.Shell;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
@@ -127,6 +128,10 @@ public class HttpServer implements FilterContainer {
   static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = 
"kerberos.name.rules";
   public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY =
   HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX;
+  static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = 
"kerberos.proxyuser.enable";
+  public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY =
+  HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX;
+  public static final boolean  
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false;
   static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX =
   "signature.secret.file";
   public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY =
@@ -145,6 +150,7 @@ public class HttpServer implements FilterContainer {
   public static final String ADMINS_ACL = "admins.acl";
   public static final String BIND_ADDRESS = "bind.address";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
+  public static final String SPNEGO_PROXYUSER_FILTER = "SpnegoProxyUserFilter";
   public static final String NO_CACHE_FILTER = "NoCacheFilter";
   public static final String APP_DIR = "webapps";
 
@@ -1026,7 +1032,18 @@ public class HttpServer implements FilterContainer {
   + "to enable SPNEGO/Kerberos authentication for the Web UI");
 }
 
-addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY,
+HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) {
+//Copy/rename standard hadoop proxyuser settings to filter
+for(Map.Entry proxyEntry :
+
conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + 
proxyEntry.getKey(),
+proxyEntry.getValue());
+}
+addGlobalFilter(SPNEGO_PROXYUSER_FILTER, 
ProxyUserAuthenticationFilter.class.getName(), params);
+} else {
+addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+}
   }
 
   /**
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
new file mode 100644
index 000..5fb17c9
--- /dev/null
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Versio

[hbase] branch branch-2.2 updated: HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

2020-04-27 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 046c80c  HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http
046c80c is described below

commit 046c80c2ff09fa82f01f983600a0498d35228134
Author: Josh Elser 
AuthorDate: Mon Apr 27 13:05:53 2020 -0400

HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

copy ProxyUserAuthenticationFilter from Hadoop
add hbase.security.authentication.spnego.kerberos.proxyuser.enable 
parameter (default false)
wire ProxyUserAuthenticationFilter into HttpServer

Signed-off-by: Sean Busbey 
Signed-off-by: Josh Elser 
---
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  19 +-
 .../hbase/http/ProxyUserAuthenticationFilter.java  | 219 +++
 .../hbase/http/HttpServerFunctionalTest.java   |  11 +
 .../hbase/http/TestProxyUserSpnegoHttpServer.java  | 295 +
 4 files changed, 543 insertions(+), 1 deletion(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 5ed641f..788fcfb 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.util.Shell;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
@@ -127,6 +128,10 @@ public class HttpServer implements FilterContainer {
   static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = 
"kerberos.name.rules";
   public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY =
   HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX;
+  static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = 
"kerberos.proxyuser.enable";
+  public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY =
+  HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX;
+  public static final boolean  
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false;
   static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX =
   "signature.secret.file";
   public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY =
@@ -145,6 +150,7 @@ public class HttpServer implements FilterContainer {
   public static final String ADMINS_ACL = "admins.acl";
   public static final String BIND_ADDRESS = "bind.address";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
+  public static final String SPNEGO_PROXYUSER_FILTER = "SpnegoProxyUserFilter";
   public static final String NO_CACHE_FILTER = "NoCacheFilter";
   public static final String APP_DIR = "webapps";
 
@@ -989,7 +995,18 @@ public class HttpServer implements FilterContainer {
   + "to enable SPNEGO/Kerberos authentication for the Web UI");
 }
 
-addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY,
+HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) {
+//Copy/rename standard hadoop proxyuser settings to filter
+for(Map.Entry proxyEntry :
+
conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + 
proxyEntry.getKey(),
+proxyEntry.getValue());
+}
+addGlobalFilter(SPNEGO_PROXYUSER_FILTER, 
ProxyUserAuthenticationFilter.class.getName(), params);
+} else {
+addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+}
   }
 
   /**
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
new file mode 100644
index 000..5fb17c9
--- /dev/null
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Versio

[hbase] branch master updated: HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

2020-04-27 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 6eb5caf  HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http
6eb5caf is described below

commit 6eb5cafe34db7fd092566c98daaf24740d10b89d
Author: Josh Elser 
AuthorDate: Mon Apr 27 13:05:53 2020 -0400

HBASE-24252 Implement proxyuser/doAs mechanism for hbase-http

copy ProxyUserAuthenticationFilter from Hadoop
add hbase.security.authentication.spnego.kerberos.proxyuser.enable 
parameter (default false)
wire ProxyUserAuthenticationFilter into HttpServer

Closes #1576

Signed-off-by: Sean Busbey 
Signed-off-by: Josh Elser 
---
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  19 +-
 .../hbase/http/ProxyUserAuthenticationFilter.java  | 219 +++
 .../hbase/http/HttpServerFunctionalTest.java   |  11 +
 .../hbase/http/TestProxyUserSpnegoHttpServer.java  | 295 +
 4 files changed, 543 insertions(+), 1 deletion(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 887380e..d3176ca 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.util.Shell;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
@@ -127,6 +128,10 @@ public class HttpServer implements FilterContainer {
   static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = 
"kerberos.name.rules";
   public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY =
   HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX;
+  static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = 
"kerberos.proxyuser.enable";
+  public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY =
+  HTTP_SPNEGO_AUTHENTICATION_PREFIX + 
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX;
+  public static final boolean  
HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false;
   static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX =
   "signature.secret.file";
   public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY =
@@ -145,6 +150,7 @@ public class HttpServer implements FilterContainer {
   public static final String ADMINS_ACL = "admins.acl";
   public static final String BIND_ADDRESS = "bind.address";
   public static final String SPNEGO_FILTER = "SpnegoFilter";
+  public static final String SPNEGO_PROXYUSER_FILTER = "SpnegoProxyUserFilter";
   public static final String NO_CACHE_FILTER = "NoCacheFilter";
   public static final String APP_DIR = "webapps";
 
@@ -1026,7 +1032,18 @@ public class HttpServer implements FilterContainer {
   + "to enable SPNEGO/Kerberos authentication for the Web UI");
 }
 
-addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY,
+HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) {
+//Copy/rename standard hadoop proxyuser settings to filter
+for(Map.Entry proxyEntry :
+
conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + 
proxyEntry.getKey(),
+proxyEntry.getValue());
+}
+addGlobalFilter(SPNEGO_PROXYUSER_FILTER, 
ProxyUserAuthenticationFilter.class.getName(), params);
+} else {
+addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), 
params);
+}
   }
 
   /**
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
new file mode 100644
index 000..5fb17c9
--- /dev/null
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under

  1   2   3   4   5   6   7   8   9   10   >