[hbase] branch branch-2.3 updated: HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687)
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 1620e6d HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) 1620e6d is described below commit 1620e6d958c4c425ed5903bc1e6a16222d58a870 Author: shahrs87 AuthorDate: Sun May 10 22:24:45 2020 -0700 HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) Signed-off-by: binlijin Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 +++ .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 184cdf6..6a0b428 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,9 +27,11 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -101,6 +103,19 @@ public class TestHBaseConfiguration { } } + @Test + public void testSecurityConfCaseInsensitive() { +Configuration conf = HBaseConfiguration.create(); +conf.set("hbase.security.authentication", "kerberos"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBEROS"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBeros"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + } + private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 6ee37cb..6a1ffbc 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -173,7 +173,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index e092f90..cf9241d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -91,7 +91,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); -if(peerConf.get("hbase.security.authentication").equals("kerberos")){ + if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -105,7 +105,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { +if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] branch branch-2.3 updated (87b8bdf -> b44cf90)
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a change to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git. from 87b8bdf HBASE-24164 Retain the ReadRequests and WriteRequests of region on we… (#1500) add b44cf90 Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" No new revisions were added by this update. Summary of changes: .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 --- .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 3 insertions(+), 18 deletions(-)
[hbase] branch branch-2.1 updated: HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687)
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2.1 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.1 by this push: new 7662d68 HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) 7662d68 is described below commit 7662d68941d0e454ca36477c9993a1d36a14e1f8 Author: shahrs87 AuthorDate: Sun May 10 22:24:45 2020 -0700 HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) Signed-off-by: binlijin Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 +++ .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 184cdf6..6a0b428 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,9 +27,11 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -101,6 +103,19 @@ public class TestHBaseConfiguration { } } + @Test + public void testSecurityConfCaseInsensitive() { +Configuration conf = HBaseConfiguration.create(); +conf.set("hbase.security.authentication", "kerberos"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBEROS"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBeros"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + } + private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index e50639a..586a3f3 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -171,7 +171,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index 1bb9969..bc85e9c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -87,7 +87,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); -if(peerConf.get("hbase.security.authentication").equals("kerberos")){ + if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -101,7 +101,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { +if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] branch branch-2.1 updated: Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)"
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2.1 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.1 by this push: new 30ab966 Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" 30ab966 is described below commit 30ab9665068ba85ddfabf0d4e21f4da28d24404e Author: Viraj Jasani AuthorDate: Thu May 14 13:33:02 2020 +0530 Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" This reverts commit 6f36c79c2fd0aadb204aed5a8f2459edfe153907. --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 --- .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b428..184cdf6 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,11 +27,9 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -103,19 +101,6 @@ public class TestHBaseConfiguration { } } - @Test - public void testSecurityConfCaseInsensitive() { -Configuration conf = HBaseConfiguration.create(); -conf.set("hbase.security.authentication", "kerberos"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - -conf.set("hbase.security.authentication", "KERBEROS"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - -conf.set("hbase.security.authentication", "KERBeros"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - } - private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 586a3f3..e50639a 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -171,7 +171,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index bc85e9c..1bb9969 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -87,7 +87,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); - if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ +if(peerConf.get("hbase.security.authentication").equals("kerberos")){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -101,7 +101,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { +if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] branch branch-2.2 updated: HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687)
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new 1ea35aa HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) 1ea35aa is described below commit 1ea35aac3ac7dc4a03dcc7010cc171501195dcdd Author: shahrs87 AuthorDate: Sun May 10 22:24:45 2020 -0700 HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) Signed-off-by: binlijin Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 +++ .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 184cdf6..6a0b428 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,9 +27,11 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -101,6 +103,19 @@ public class TestHBaseConfiguration { } } + @Test + public void testSecurityConfCaseInsensitive() { +Configuration conf = HBaseConfiguration.create(); +conf.set("hbase.security.authentication", "kerberos"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBEROS"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBeros"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + } + private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index e50639a..586a3f3 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -171,7 +171,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index e092f90..cf9241d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -91,7 +91,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); -if(peerConf.get("hbase.security.authentication").equals("kerberos")){ + if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -105,7 +105,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { +if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] branch branch-2 updated: Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)"
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new b78f436 Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" b78f436 is described below commit b78f4367f710a8cb2b3df37ba158604e530301dc Author: Viraj Jasani AuthorDate: Thu May 14 11:45:23 2020 +0530 Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" This reverts commit c03ec837e70ebf014aabd8610d5fe4d53b239efa. --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 --- .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b428..184cdf6 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,11 +27,9 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -103,19 +101,6 @@ public class TestHBaseConfiguration { } } - @Test - public void testSecurityConfCaseInsensitive() { -Configuration conf = HBaseConfiguration.create(); -conf.set("hbase.security.authentication", "kerberos"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - -conf.set("hbase.security.authentication", "KERBEROS"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - -conf.set("hbase.security.authentication", "KERBeros"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - } - private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 6a1ffbc..6ee37cb 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -173,7 +173,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index cf9241d..e092f90 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -91,7 +91,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); - if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ +if(peerConf.get("hbase.security.authentication").equals("kerberos")){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -105,7 +105,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { +if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] branch branch-2 updated: HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687)
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 941cef7 HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) 941cef7 is described below commit 941cef71c269e06598ed5957eec93310689f68b9 Author: shahrs87 AuthorDate: Sun May 10 22:24:45 2020 -0700 HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) Signed-off-by: binlijin Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 +++ .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 184cdf6..6a0b428 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,9 +27,11 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -101,6 +103,19 @@ public class TestHBaseConfiguration { } } + @Test + public void testSecurityConfCaseInsensitive() { +Configuration conf = HBaseConfiguration.create(); +conf.set("hbase.security.authentication", "kerberos"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBEROS"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + +conf.set("hbase.security.authentication", "KERBeros"); +Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); + } + private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 6ee37cb..6a1ffbc 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -173,7 +173,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index e092f90..cf9241d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -91,7 +91,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); -if(peerConf.get("hbase.security.authentication").equals("kerberos")){ + if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -105,7 +105,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { +if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] branch branch-2.2 updated: Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)"
This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new 87ce2cf Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" 87ce2cf is described below commit 87ce2cff979df88eed3ac2e530068fe2506a6fb6 Author: Viraj Jasani AuthorDate: Thu May 14 13:30:52 2020 +0530 Revert "Make kerberos value of hbase.security.authentication property case insensitive (#1687)" This reverts commit 057d83cfafd8d659576869f1e71e3e75029fbad3. --- .../org/apache/hadoop/hbase/TestHBaseConfiguration.java | 15 --- .../java/org/apache/hadoop/hbase/http/InfoServer.java | 2 +- .../java/org/apache/hadoop/hbase/mapreduce/SyncTable.java | 4 ++-- 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java index 6a0b428..184cdf6 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java @@ -27,11 +27,9 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -103,19 +101,6 @@ public class TestHBaseConfiguration { } } - @Test - public void testSecurityConfCaseInsensitive() { -Configuration conf = HBaseConfiguration.create(); -conf.set("hbase.security.authentication", "kerberos"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - -conf.set("hbase.security.authentication", "KERBEROS"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - -conf.set("hbase.security.authentication", "KERBeros"); -Assert.assertTrue(User.isHBaseSecurityEnabled(conf)); - } - private static class ReflectiveCredentialProviderClient { public static final String HADOOP_CRED_PROVIDER_FACTORY_CLASS_NAME = "org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 586a3f3..e50639a 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -171,7 +171,7 @@ public class InfoServer { return false; } String remoteUser = req.getRemoteUser(); -if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && +if ("kerberos".equals(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index cf9241d..e092f90 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -91,7 +91,7 @@ public class SyncTable extends Configured implements Tool { private void initCredentialsForHBase(String zookeeper, Job job) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(job .getConfiguration(), zookeeper); - if("kerberos".equalsIgnoreCase(peerConf.get("hbase.security.authentication"))){ +if(peerConf.get("hbase.security.authentication").equals("kerberos")){ TableMapReduceUtil.initCredentialsForCluster(job, peerConf); } } @@ -105,7 +105,7 @@ public class SyncTable extends Configured implements Tool { Job job = Job.getInstance(getConf(),getConf().get("mapreduce.job.name", "syncTable_" + sourceTableName + "-" + targetTableName)); Configuration jobConf = job.getConfiguration(); -if ("kerberos".equalsIgnoreCase(jobConf.get("hadoop.security.authentication"))) { +if (jobConf.get("hadoop.security.authentication").equals("kerberos")) { TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { sourceHashDir }, getConf()); }
[hbase] 01/02: Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem"
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 894e0c025f6df4c96535074e262d354d343431c1 Author: Nick Dimiduk AuthorDate: Thu May 14 14:11:07 2020 -0700 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" This reverts commit 1d912790d5d122693ddd6589972c5599e2ae5cae. --- .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java | 14 +++--- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index bed6551..7ed2a78 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; @@ -45,6 +44,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -352,19 +352,11 @@ public abstract class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propagate this if set +// hadoop-core does fs caching, so need to propogate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); } -if (fs instanceof LocalFileSystem) { - // running on LocalFileSystem, which does not support the required capabilities `HSYNC` - // and `HFLUSH`. disable enforcement. - final boolean value = false; - LOG.warn("Cannot enforce durability guarantees while running on {}. Setting {}={} for" -+ " this FileSystem.", fs.getUri(), UNSAFE_STREAM_CAPABILITY_ENFORCE, value); - fs.getConf().setBoolean(UNSAFE_STREAM_CAPABILITY_ENFORCE, value); -} return fs; }
[hbase] 02/02: HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git commit d240146a62cef71d74f456e8e682790a29fb7bfc Author: Nick Dimiduk AuthorDate: Mon Apr 27 13:07:29 2020 -0700 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box Simplify the new user experience shipping a configuration that enables a fresh checkout or tarball distribution to run in standalone mode without direct user configuration. This change restores the behavior we had when running on Hadoop 2.8 and earlier. Patch for master includes an update to the book. This change will be omitted when backporting to earlier branches. Signed-off-by: stack Signed-off-by: Josh Elser Signed-off-by: Duo Zhang --- .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 4 +- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 42 insertions(+), 74 deletions(-) diff --git a/.gitignore b/.gitignore index 0ae87d9..0b883e0 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ linklint/ .checkstyle **/.checkstyle .java-version +tmp diff --git a/conf/hbase-site.xml b/conf/hbase-site.xml index c516ac7..20c62f7 100644 --- a/conf/hbase-site.xml +++ b/conf/hbase-site.xml @@ -1,8 +1,7 @@ + + +hbase.cluster.distributed +false + + +hbase.tmp.dir +./tmp + + +hbase.unsafe.stream.capability.enforce +false + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 7ed2a78..d202460 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -352,7 +352,7 @@ public abstract class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propogate this if set +// hadoop-core does fs caching, so need to propagate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc index 84ebcaa..e50ea6b 100644 --- a/src/main/asciidoc/_chapters/getting_started.adoc +++ b/src/main/asciidoc/_chapters/getting_started.adoc @@ -80,76 +80,12 @@ $ cd hbase-{Version}/ JAVA_HOME=/usr + - -. Edit _conf/hbase-site.xml_, which is the main HBase configuration file. - At this time, you need to specify the directory on the local filesystem where HBase and ZooKeeper write data and acknowledge some risks. - By default, a new directory is created under /tmp. - Many servers are configured to delete the contents of _/tmp_ upon reboot, so you should store the data elsewhere. - The following configuration will store HBase's data in the _hbase_ directory, in the home directory of the user called `testuser`. - Paste the `` tags beneath the `` tags, which should be empty in a new HBase install. -+ -.Example _hbase-site.xml_ for Standalone HBase - -[source,xml] - - - - -hbase.rootdir -file:///home/testuser/hbase - - -hbase.zookeeper.property.dataDir -/home/testuser/zookeeper - - -hbase.unsafe.stream.capability.enforce -false - - Controls whether HBase will check for stream capabilities (hflush/hsync). - - Disable this if you intend to run on LocalFileSystem, denoted by a rootdir - with the 'file://' scheme, but be mindful of the NOTE below. - - WARNING: Setting this to false blinds you to potential data loss and - inconsistent system state in the event of process and/or node failures. If - HBase is complaining of an inability to use hsync or hflush it's most - likely not a false positive. - - - - - -+ -You do not need to create the HBase data directory. -HBase will do this for you. If you create the directory, -HBase will attempt to do a migration, which is not what you want. -+ -NOTE: The _hbase.rootdir_ in the above example points to a directory -in the _local filesystem_. The 'file://' prefix is how we denote local -filesystem. You should take the WARNING present in the configuration example -to heart. In standalone mode HBase makes use
[hbase] branch branch-2.2 updated (1ea35aa -> d240146)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git. from 1ea35aa HBASE-24190 : Make kerberos value of hbase.security.authentication property case insensitive (#1687) new 894e0c0 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" new d240146 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box The 2 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 10 +-- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 41 insertions(+), 81 deletions(-)
[hbase] branch branch-2.2 updated: HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new a6152af HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) a6152af is described below commit a6152af6cf3473e0a318960e6d2e90fc07f30d70 Author: Guanghao Zhang AuthorDate: Fri May 15 09:05:25 2020 +0800 HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) Co-authored-by: Ctest Signed-off-by: Anoop Sam John Signed-off-by: Wellington Chevreuil --- .../java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java | 5 + 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 522d283..2e9db58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -128,6 +128,11 @@ class MemStoreFlusher implements FlushRequester { this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", 9); int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2); +if (handlerCount < 1) { + LOG.warn("hbase.hstore.flusher.count was configed to {} which is less than 1, corrected to 1", + handlerCount); + handlerCount = 1; +} this.flushHandlers = new FlushHandler[handlerCount]; LOG.info("globalMemStoreLimit=" + TraditionalBinaryPrefix
[hbase] branch master updated: HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new bab4b1c HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) bab4b1c is described below commit bab4b1cf8c57456794e15e556a7483b88c504ae2 Author: Guanghao Zhang AuthorDate: Fri May 15 09:05:25 2020 +0800 HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) Co-authored-by: Ctest Signed-off-by: Anoop Sam John Signed-off-by: Wellington Chevreuil --- .../java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java | 5 + 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index b94afab..9f4d881 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -129,6 +129,11 @@ class MemStoreFlusher implements FlushRequester { this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", 9); int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2); +if (handlerCount < 1) { + LOG.warn("hbase.hstore.flusher.count was configed to {} which is less than 1, corrected to 1", + handlerCount); + handlerCount = 1; +} this.flushHandlers = new FlushHandler[handlerCount]; LOG.info("globalMemStoreLimit=" + TraditionalBinaryPrefix
[hbase] branch branch-2 updated: HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 692a597 HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) 692a597 is described below commit 692a597b638277ede0737b9f4ef8fe6a8998c3b7 Author: Guanghao Zhang AuthorDate: Fri May 15 09:05:25 2020 +0800 HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) Co-authored-by: Ctest Signed-off-by: Anoop Sam John Signed-off-by: Wellington Chevreuil --- .../java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java | 5 + 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 39b0368..fcff217 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -128,6 +128,11 @@ class MemStoreFlusher implements FlushRequester { this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", 9); int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2); +if (handlerCount < 1) { + LOG.warn("hbase.hstore.flusher.count was configed to {} which is less than 1, corrected to 1", + handlerCount); + handlerCount = 1; +} this.flushHandlers = new FlushHandler[handlerCount]; LOG.info("globalMemStoreLimit=" + TraditionalBinaryPrefix
[hbase] branch master updated: HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707)
This is an automated email from the ASF dual-hosted git repository. meiyi pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new a73132c HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707) a73132c is described below commit a73132c62beb854c39d4c0cef317a99a95c5d758 Author: meiyi AuthorDate: Fri May 15 11:30:44 2020 +0800 HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707) Signed-off-by: Jan Hentschel --- .../org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java index 0ff81d9..afa8a25 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java @@ -46,7 +46,7 @@ public class ChangeEncodingAction extends Action { public void perform() throws IOException { getLogger().debug("Performing action: Changing encodings on " + tableName); // possible DataBlockEncoding id's -final int[] possibleIds = {0, 2, 3, 4, 6}; +final int[] possibleIds = {0, 2, 3, 4, 7}; modifyAllTableColumns(tableName, (columnName, columnBuilder) -> { short id = (short) possibleIds[random.nextInt(possibleIds.length)];
[hbase] branch branch-2.3 updated: HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 20557c0 HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) 20557c0 is described below commit 20557c05105e95b79f3deb2ae4741c8f1be81200 Author: Guanghao Zhang AuthorDate: Fri May 15 09:05:25 2020 +0800 HBASE-23702 hbase.hstore.flusher.count setting to 0 breaks HMaster (#1703) Co-authored-by: Ctest Signed-off-by: Anoop Sam John Signed-off-by: Wellington Chevreuil --- .../java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java | 5 + 1 file changed, 5 insertions(+) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 39b0368..fcff217 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -128,6 +128,11 @@ class MemStoreFlusher implements FlushRequester { this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", 9); int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2); +if (handlerCount < 1) { + LOG.warn("hbase.hstore.flusher.count was configed to {} which is less than 1, corrected to 1", + handlerCount); + handlerCount = 1; +} this.flushHandlers = new FlushHandler[handlerCount]; LOG.info("globalMemStoreLimit=" + TraditionalBinaryPrefix
[hbase] branch branch-2.2 updated: HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707)
This is an automated email from the ASF dual-hosted git repository. meiyi pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new 43d39ee HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707) 43d39ee is described below commit 43d39ee26549e4be52c66a9dd6f26867696708ef Author: meiyi AuthorDate: Fri May 15 11:30:44 2020 +0800 HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707) Signed-off-by: Jan Hentschel --- .../org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java index 55a308c..4977426 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java @@ -43,7 +43,7 @@ public class ChangeEncodingAction extends Action { public void perform() throws IOException { LOG.debug("Performing action: Changing encodings on " + tableName); // possible DataBlockEncoding id's -final int[] possibleIds = {0, 2, 3, 4, 6}; +final int[] possibleIds = {0, 2, 3, 4, 7}; modifyAllTableColumns(tableName, (columnName, columnBuilder) -> { short id = (short) possibleIds[random.nextInt(possibleIds.length)];
[hbase] branch branch-2.3 updated: HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707)
This is an automated email from the ASF dual-hosted git repository. meiyi pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 9b64ab0 HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707) 9b64ab0 is described below commit 9b64ab029cafc81a08b073029a0663aa00702962 Author: meiyi AuthorDate: Fri May 15 11:30:44 2020 +0800 HBASE-24364 [Chaos Monkey] Invalid data block encoding in ChangeEncodingAction (#1707) Signed-off-by: Jan Hentschel --- .../org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java index 0ff81d9..afa8a25 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java @@ -46,7 +46,7 @@ public class ChangeEncodingAction extends Action { public void perform() throws IOException { getLogger().debug("Performing action: Changing encodings on " + tableName); // possible DataBlockEncoding id's -final int[] possibleIds = {0, 2, 3, 4, 6}; +final int[] possibleIds = {0, 2, 3, 4, 7}; modifyAllTableColumns(tableName, (columnName, columnBuilder) -> { short id = (short) possibleIds[random.nextInt(possibleIds.length)];
[hbase] branch master updated: HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new 32e2682 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null 32e2682 is described below commit 32e2682310e725526ca3417757968d452bb5775b Author: stack AuthorDate: Wed May 13 22:19:25 2020 -0700 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java Edit a log. hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java Add override of isMatchingRegionLocation. Allow 'null' as a pass in HBCKSCP. hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java Add a method for HBCKSCP to override and be less strict filtering assigns. hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp Some doc on what 'Unknown Servers' are. --- .../GCMultipleMergedRegionsProcedure.java | 5 ++--- .../master/procedure/HBCKServerCrashProcedure.java | 13 +++ .../master/procedure/ServerCrashProcedure.java | 26 -- .../main/resources/hbase-webapps/master/hbck.jsp | 23 +++ 4 files changed, 53 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java index 4fc5484..71fcd35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java @@ -99,12 +99,11 @@ public class GCMultipleMergedRegionsProcedure extends case GC_MERGED_REGIONS_PREPARE: // If GCMultipleMergedRegionsProcedure processing is slower than the CatalogJanitor's scan // interval, it will end resubmitting GCMultipleMergedRegionsProcedure for the same - // region, we can skip duplicate GCMultipleMergedRegionsProcedure while previous finished + // region. We can skip duplicate GCMultipleMergedRegionsProcedure while previous finished List parents = MetaTableAccessor.getMergeRegions( env.getMasterServices().getConnection(), mergedChild.getRegionName()); if (parents == null || parents.isEmpty()) { -LOG.info("Region=" + mergedChild.getShortNameToLog() -+ " info:merge qualifier has been deleted"); +LOG.info("{} mergeXXX qualifiers have ALL been deleted", mergedChild.getShortNameToLog()); return Flow.NO_MORE_STATE; } setNextState(GCMergedRegionsState.GC_MERGED_REGIONS_PURGE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java index eec820c..a12b853 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.RegionStateStore; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -168,4 +169,16 @@ public class HBCKServerCrashProcedure extends ServerCrashProcedure { return this.reassigns; } } + + /** + * The RegionStateNode will not have a location if a confirm of an OPEN fails. On fail, + * the RegionStateNode regionLocation is set to null. This is 'looser' than the test done + * in the superclass. The HBCKSCP has been scheduled by an operator via hbck2 probably at the + * behest of a report of an 'Unknown Server' in the 'HBCK Report'. Let the operators operation + * succeed even in case where the region location in the RegionStateNode is null. + */ + @Override + protected boolean isMatchingRegionLocation(RegionStateNode rsn) { +return super.isMatchingRegionLocation(rsn) || rsn.getRegionLocation() == null; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
[hbase] 02/02: HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-1 in repository https://gitbox.apache.org/repos/asf/hbase.git commit a53356729fce74ea71d945747131baf8ae5a Author: Nick Dimiduk AuthorDate: Mon Apr 27 13:07:29 2020 -0700 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box Simplify the new user experience shipping a configuration that enables a fresh checkout or tarball distribution to run in standalone mode without direct user configuration. This change restores the behavior we had when running on Hadoop 2.8 and earlier. Patch for master includes an update to the book. This change will be omitted when backporting to earlier branches. Signed-off-by: stack Signed-off-by: Josh Elser Signed-off-by: Duo Zhang --- .gitignore | 1 + conf/hbase-site.xml| 34 -- .../apache/hadoop/hbase/util/CommonFSUtils.java| 4 +-- 3 files changed, 35 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 0ae87d9..0b883e0 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ linklint/ .checkstyle **/.checkstyle .java-version +tmp diff --git a/conf/hbase-site.xml b/conf/hbase-site.xml index c516ac7..20c62f7 100644 --- a/conf/hbase-site.xml +++ b/conf/hbase-site.xml @@ -1,8 +1,7 @@ + + +hbase.cluster.distributed +false + + +hbase.tmp.dir +./tmp + + +hbase.unsafe.stream.capability.enforce +false + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index ef6d489..1225d19 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -346,7 +346,7 @@ public abstract class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propogate this if set +// hadoop-core does fs caching, so need to propagate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability);
[hbase] branch branch-1 updated (59a48fc -> a533567)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch branch-1 in repository https://gitbox.apache.org/repos/asf/hbase.git. from 59a48fc HBASE-24355 Fix typos in the HStore#compact annotation (#1699) new 67abffa Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" new a533567 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box The 2 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .gitignore | 1 + conf/hbase-site.xml| 34 -- .../apache/hadoop/hbase/util/CommonFSUtils.java| 10 --- 3 files changed, 33 insertions(+), 12 deletions(-)
[hbase] 01/02: Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem"
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-1 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 67abffa10554b07ab3093aca93f44576c4cb21fe Author: Nick Dimiduk AuthorDate: Thu May 14 14:24:42 2020 -0700 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" This reverts commit 03d73f151b26dd35cd9d0d36bc22a9bc9a077188. --- .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java | 14 ++ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 77615db..ef6d489 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -39,7 +39,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; @@ -347,20 +346,11 @@ public abstract class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propagate this if set +// hadoop-core does fs caching, so need to propogate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); } -if (fs instanceof LocalFileSystem) { - // running on LocalFileSystem, which does not support the required capabilities `HSYNC` - // and `HFLUSH`. disable enforcement. - final boolean value = false; - LOG.warn("Cannot enforce durability guarantees while running on " + fs.getUri() -+ ". Setting " + UNSAFE_STREAM_CAPABILITY_ENFORCE + "=" + value -+ " for this FileSystem."); - fs.getConf().setBoolean(UNSAFE_STREAM_CAPABILITY_ENFORCE, value); -} return fs; }
[hbase] branch master updated: Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704)
This is an automated email from the ASF dual-hosted git repository. apurtell pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new 94f36fd Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) 94f36fd is described below commit 94f36fdb8ecef55448eab590dee9670ab8980bd1 Author: Andrew Purtell AuthorDate: Thu May 14 13:41:56 2020 -0700 Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) - Rename WALEntryBatch#getWaEntriesWithSize -> getWalEntriesWithSize --- .../hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java | 2 +- .../org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index ad76c5c..e50005a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -221,7 +221,7 @@ public class ReplicationSourceShipper extends Thread { entryBatch.getNbHFiles()); source.getSourceMetrics().setAgeOfLastShippedOp( entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId); - source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWaEntriesWithSize()); + source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize()); if (LOG.isTraceEnabled()) { LOG.debug("Replicated {} entries or {} operations in {} ms", diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java index bc600d0..4f96c96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java @@ -83,7 +83,7 @@ class WALEntryBatch { /** * @return the WAL Entries. */ - public List> getWaEntriesWithSize() { + public List> getWalEntriesWithSize() { return walEntriesWithSize; }
[hbase] 01/02: HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704)
This is an automated email from the ASF dual-hosted git repository. apurtell pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 1ff532678dca7faf1c7404c6fe0e118ef0cd4872 Author: Sandeep Pal <50725353+sandeepvina...@users.noreply.github.com> AuthorDate: Thu May 14 10:34:51 2020 -0700 HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) Signed-off-by: Wellington Chevreuil Signed-off-by: Andrew Purtell --- .../MetricsReplicationSourceFactory.java | 1 + ...ory.java => MetricsReplicationTableSource.java} | 12 +- .../MetricsReplicationSourceFactoryImpl.java | 4 + .../MetricsReplicationTableSourceImpl.java | 134 + .../replication/regionserver/MetricsSource.java| 36 +- .../regionserver/ReplicationSource.java| 2 +- .../regionserver/ReplicationSourceShipper.java | 8 +- .../regionserver/ReplicationSourceWALReader.java | 6 +- .../replication/regionserver/WALEntryBatch.java| 24 ++-- .../hbase/replication/TestReplicationEndpoint.java | 52 ++-- 10 files changed, 242 insertions(+), 37 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java index 6534b11..2816f83 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java @@ -24,5 +24,6 @@ import org.apache.yetus.audience.InterfaceAudience; public interface MetricsReplicationSourceFactory { public MetricsReplicationSinkSource getSink(); public MetricsReplicationSourceSource getSource(String id); + public MetricsReplicationTableSource getTableSource(String tableName); public MetricsReplicationSourceSource getGlobalSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java similarity index 78% copy from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java copy to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java index 6534b11..faa944a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java @@ -18,11 +18,15 @@ package org.apache.hadoop.hbase.replication.regionserver; +import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public interface MetricsReplicationSourceFactory { - public MetricsReplicationSinkSource getSink(); - public MetricsReplicationSourceSource getSource(String id); - public MetricsReplicationSourceSource getGlobalSource(); +public interface MetricsReplicationTableSource extends BaseSource { + + void setLastShippedAge(long age); + void incrShippedBytes(long size); + long getShippedBytes(); + void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java index af310f0..a3b3462 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java @@ -35,6 +35,10 @@ public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSo return new MetricsReplicationSourceSourceImpl(SourceHolder.INSTANCE.source, id); } + @Override public MetricsReplicationTableSource getTableSource(String tableName) { +return new MetricsReplicationTableSourceImpl(SourceHolder.INSTANCE.source, tableName); + } + @Override public MetricsReplicationSourceSource getGlobalSource() { return new MetricsReplicationGlobalSourceSource(SourceHolder.INSTANCE.source); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java new file mode 100644 index
[hbase] branch branch-2 updated (2e4be3e -> a93d94c)
This is an automated email from the ASF dual-hosted git repository. apurtell pushed a change to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git. from 2e4be3e HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null new 1ff5326 HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) new a93d94c Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) The 2 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .../MetricsReplicationSourceFactory.java | 1 + ...ory.java => MetricsReplicationTableSource.java} | 12 +- .../MetricsReplicationSourceFactoryImpl.java | 4 + .../MetricsReplicationTableSourceImpl.java | 134 + .../replication/regionserver/MetricsSource.java| 36 +- .../regionserver/ReplicationSource.java| 2 +- .../regionserver/ReplicationSourceShipper.java | 8 +- .../regionserver/ReplicationSourceWALReader.java | 6 +- .../replication/regionserver/WALEntryBatch.java| 24 ++-- .../hbase/replication/TestReplicationEndpoint.java | 52 ++-- 10 files changed, 242 insertions(+), 37 deletions(-) copy hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/{MetricsReplicationSourceFactory.java => MetricsReplicationTableSource.java} (78%) create mode 100644 hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java
[hbase] 02/02: Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704)
This is an automated email from the ASF dual-hosted git repository. apurtell pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git commit a93d94cad312d7c92b3a2eb43848058605d27dcb Author: Andrew Purtell AuthorDate: Thu May 14 13:41:56 2020 -0700 Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) - Rename WALEntryBatch#getWaEntriesWithSize -> getWalEntriesWithSize --- .../hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java | 2 +- .../org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index 65430b3..d7e7e01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -211,7 +211,7 @@ public class ReplicationSourceShipper extends Thread { entryBatch.getNbHFiles()); source.getSourceMetrics().setAgeOfLastShippedOp( entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId); - source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWaEntriesWithSize()); + source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize()); if (LOG.isTraceEnabled()) { LOG.debug("Replicated {} entries or {} operations in {} ms", diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java index bc600d0..4f96c96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java @@ -83,7 +83,7 @@ class WALEntryBatch { /** * @return the WAL Entries. */ - public List> getWaEntriesWithSize() { + public List> getWalEntriesWithSize() { return walEntriesWithSize; }
[hbase] 02/02: HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 1c32a8ba2e9e87873194a20f5de85c6031e74f47 Author: Nick Dimiduk AuthorDate: Mon Apr 27 13:07:29 2020 -0700 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box Simplify the new user experience shipping a configuration that enables a fresh checkout or tarball distribution to run in standalone mode without direct user configuration. This change restores the behavior we had when running on Hadoop 2.8 and earlier. Patch for master includes an update to the book. This change will be omitted when backporting to earlier branches. Signed-off-by: stack Signed-off-by: Josh Elser Signed-off-by: Duo Zhang --- .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 4 +- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 42 insertions(+), 74 deletions(-) diff --git a/.gitignore b/.gitignore index 0ae87d9..0b883e0 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ linklint/ .checkstyle **/.checkstyle .java-version +tmp diff --git a/conf/hbase-site.xml b/conf/hbase-site.xml index c516ac7..20c62f7 100644 --- a/conf/hbase-site.xml +++ b/conf/hbase-site.xml @@ -1,8 +1,7 @@ + + +hbase.cluster.distributed +false + + +hbase.tmp.dir +./tmp + + +hbase.unsafe.stream.capability.enforce +false + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index a8c24c5..d46352d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -350,7 +350,7 @@ public final class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propogate this if set +// hadoop-core does fs caching, so need to propagate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc index 84ebcaa..e50ea6b 100644 --- a/src/main/asciidoc/_chapters/getting_started.adoc +++ b/src/main/asciidoc/_chapters/getting_started.adoc @@ -80,76 +80,12 @@ $ cd hbase-{Version}/ JAVA_HOME=/usr + - -. Edit _conf/hbase-site.xml_, which is the main HBase configuration file. - At this time, you need to specify the directory on the local filesystem where HBase and ZooKeeper write data and acknowledge some risks. - By default, a new directory is created under /tmp. - Many servers are configured to delete the contents of _/tmp_ upon reboot, so you should store the data elsewhere. - The following configuration will store HBase's data in the _hbase_ directory, in the home directory of the user called `testuser`. - Paste the `` tags beneath the `` tags, which should be empty in a new HBase install. -+ -.Example _hbase-site.xml_ for Standalone HBase - -[source,xml] - - - - -hbase.rootdir -file:///home/testuser/hbase - - -hbase.zookeeper.property.dataDir -/home/testuser/zookeeper - - -hbase.unsafe.stream.capability.enforce -false - - Controls whether HBase will check for stream capabilities (hflush/hsync). - - Disable this if you intend to run on LocalFileSystem, denoted by a rootdir - with the 'file://' scheme, but be mindful of the NOTE below. - - WARNING: Setting this to false blinds you to potential data loss and - inconsistent system state in the event of process and/or node failures. If - HBase is complaining of an inability to use hsync or hflush it's most - likely not a false positive. - - - - - -+ -You do not need to create the HBase data directory. -HBase will do this for you. If you create the directory, -HBase will attempt to do a migration, which is not what you want. -+ -NOTE: The _hbase.rootdir_ in the above example points to a directory -in the _local filesystem_. The 'file://' prefix is how we denote local -filesystem. You should take the WARNING present in the configuration example -to heart. In standalone mode HBase makes use of
[hbase] branch branch-2 updated (a93d94c -> 1c32a8b)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git. from a93d94c Amend HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) new 2ab394d Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" new 1c32a8b HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box The 2 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 11 +--- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 42 insertions(+), 81 deletions(-)
[hbase] 01/02: Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem"
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 2ab394ddb16040d0bc97f6b873be398e9c473bdf Author: Nick Dimiduk AuthorDate: Thu May 14 13:31:41 2020 -0700 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" This reverts commit 4e31ada43b2aa80634e3417bb773c604a0928ab0. --- .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java | 15 --- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 2e21605..a8c24c5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -28,12 +28,12 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStreamBuilder; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -349,19 +350,11 @@ public final class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propagate this if set +// hadoop-core does fs caching, so need to propogate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); } -if (fs instanceof LocalFileSystem) { - // running on LocalFileSystem, which does not support the required capabilities `HSYNC` - // and `HFLUSH`. disable enforcement. - final boolean value = false; - LOG.warn("Cannot enforce durability guarantees while running on {}. Setting {}={} for" -+ " this FileSystem.", fs.getUri(), UNSAFE_STREAM_CAPABILITY_ENFORCE, value); - fs.getConf().setBoolean(UNSAFE_STREAM_CAPABILITY_ENFORCE, value); -} return fs; }
[hbase] branch branch-2.3 updated: HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 595b861 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null 595b861 is described below commit 595b8619a68ff88e920acd2101795003887bf92f Author: stack AuthorDate: Wed May 13 22:19:25 2020 -0700 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java Edit a log. hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java Add override of isMatchingRegionLocation. Allow 'null' as a pass in HBCKSCP. hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java Add a method for HBCKSCP to override and be less strict filtering assigns. hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp Some doc on what 'Unknown Servers' are. --- .../GCMultipleMergedRegionsProcedure.java | 5 ++--- .../master/procedure/HBCKServerCrashProcedure.java | 13 +++ .../master/procedure/ServerCrashProcedure.java | 26 -- .../main/resources/hbase-webapps/master/hbck.jsp | 23 +++ 4 files changed, 53 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java index 4fc5484..71fcd35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java @@ -99,12 +99,11 @@ public class GCMultipleMergedRegionsProcedure extends case GC_MERGED_REGIONS_PREPARE: // If GCMultipleMergedRegionsProcedure processing is slower than the CatalogJanitor's scan // interval, it will end resubmitting GCMultipleMergedRegionsProcedure for the same - // region, we can skip duplicate GCMultipleMergedRegionsProcedure while previous finished + // region. We can skip duplicate GCMultipleMergedRegionsProcedure while previous finished List parents = MetaTableAccessor.getMergeRegions( env.getMasterServices().getConnection(), mergedChild.getRegionName()); if (parents == null || parents.isEmpty()) { -LOG.info("Region=" + mergedChild.getShortNameToLog() -+ " info:merge qualifier has been deleted"); +LOG.info("{} mergeXXX qualifiers have ALL been deleted", mergedChild.getShortNameToLog()); return Flow.NO_MORE_STATE; } setNextState(GCMergedRegionsState.GC_MERGED_REGIONS_PURGE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java index eec820c..a12b853 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.RegionStateStore; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -168,4 +169,16 @@ public class HBCKServerCrashProcedure extends ServerCrashProcedure { return this.reassigns; } } + + /** + * The RegionStateNode will not have a location if a confirm of an OPEN fails. On fail, + * the RegionStateNode regionLocation is set to null. This is 'looser' than the test done + * in the superclass. The HBCKSCP has been scheduled by an operator via hbck2 probably at the + * behest of a report of an 'Unknown Server' in the 'HBCK Report'. Let the operators operation + * succeed even in case where the region location in the RegionStateNode is null. + */ + @Override + protected boolean isMatchingRegionLocation(RegionStateNode rsn) { +return super.isMatchingRegionLocation(rsn) || rsn.getRegionLocation() == null; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
[hbase] branch branch-2 updated: HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 2e4be3e HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null 2e4be3e is described below commit 2e4be3e77db6dc53626b3d99c26699273b91399d Author: stack AuthorDate: Wed May 13 22:19:25 2020 -0700 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java Edit a log. hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java Add override of isMatchingRegionLocation. Allow 'null' as a pass in HBCKSCP. hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java Add a method for HBCKSCP to override and be less strict filtering assigns. hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp Some doc on what 'Unknown Servers' are. --- .../GCMultipleMergedRegionsProcedure.java | 5 ++--- .../master/procedure/HBCKServerCrashProcedure.java | 13 +++ .../master/procedure/ServerCrashProcedure.java | 26 -- .../main/resources/hbase-webapps/master/hbck.jsp | 23 +++ 4 files changed, 53 insertions(+), 14 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java index 4fc5484..71fcd35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMultipleMergedRegionsProcedure.java @@ -99,12 +99,11 @@ public class GCMultipleMergedRegionsProcedure extends case GC_MERGED_REGIONS_PREPARE: // If GCMultipleMergedRegionsProcedure processing is slower than the CatalogJanitor's scan // interval, it will end resubmitting GCMultipleMergedRegionsProcedure for the same - // region, we can skip duplicate GCMultipleMergedRegionsProcedure while previous finished + // region. We can skip duplicate GCMultipleMergedRegionsProcedure while previous finished List parents = MetaTableAccessor.getMergeRegions( env.getMasterServices().getConnection(), mergedChild.getRegionName()); if (parents == null || parents.isEmpty()) { -LOG.info("Region=" + mergedChild.getShortNameToLog() -+ " info:merge qualifier has been deleted"); +LOG.info("{} mergeXXX qualifiers have ALL been deleted", mergedChild.getShortNameToLog()); return Flow.NO_MORE_STATE; } setNextState(GCMergedRegionsState.GC_MERGED_REGIONS_PURGE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java index eec820c..a12b853 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.RegionStateStore; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -168,4 +169,16 @@ public class HBCKServerCrashProcedure extends ServerCrashProcedure { return this.reassigns; } } + + /** + * The RegionStateNode will not have a location if a confirm of an OPEN fails. On fail, + * the RegionStateNode regionLocation is set to null. This is 'looser' than the test done + * in the superclass. The HBCKSCP has been scheduled by an operator via hbck2 probably at the + * behest of a report of an 'Unknown Server' in the 'HBCK Report'. Let the operators operation + * succeed even in case where the region location in the RegionStateNode is null. + */ + @Override + protected boolean isMatchingRegionLocation(RegionStateNode rsn) { +return super.isMatchingRegionLocation(rsn) || rsn.getRegionLocation() == null; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
[hbase] 03/03: HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git commit 7e295e767fee85d04db9870bfc516b8ceebdca70 Author: Nick Dimiduk AuthorDate: Mon Apr 27 13:07:29 2020 -0700 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box Simplify the new user experience shipping a configuration that enables a fresh checkout or tarball distribution to run in standalone mode without direct user configuration. This change restores the behavior we had when running on Hadoop 2.8 and earlier. Patch for master includes an update to the book. This change will be omitted when backporting to earlier branches. Signed-off-by: stack Signed-off-by: Josh Elser Signed-off-by: Duo Zhang --- .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 4 +- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 42 insertions(+), 74 deletions(-) diff --git a/.gitignore b/.gitignore index 5b33192..50dfdfb 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ linklint/ .java-version *.log **/*.log +tmp diff --git a/conf/hbase-site.xml b/conf/hbase-site.xml index c516ac7..48b78ec 100644 --- a/conf/hbase-site.xml +++ b/conf/hbase-site.xml @@ -1,8 +1,7 @@ + + +hbase.cluster.distributed +false + + +hbase.tmp.dir +${env.HBASE_HOME:-.}/tmp + + +hbase.unsafe.stream.capability.enforce +false + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 29b3e14..f6ff8fa 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -349,7 +349,7 @@ public final class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propogate this if set +// hadoop-core does fs caching, so need to propagate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc index e12b7a2..9e4aa8c 100644 --- a/src/main/asciidoc/_chapters/getting_started.adoc +++ b/src/main/asciidoc/_chapters/getting_started.adoc @@ -77,76 +77,12 @@ $ cd hbase-{Version}/ # The java implementation to use. export JAVA_HOME=/usr/jdk64/jdk1.8.0_112 + - -. Edit _conf/hbase-site.xml_, which is the main HBase configuration file. - At this time, you need to specify the directory on the local filesystem where HBase and ZooKeeper write data and acknowledge some risks. - By default, a new directory is created under /tmp. - Many servers are configured to delete the contents of _/tmp_ upon reboot, so you should store the data elsewhere. - The following configuration will store HBase's data in the _hbase_ directory, in the home directory of the user called `testuser`. - Paste the `` tags beneath the `` tags, which should be empty in a new HBase install. -+ -.Example _hbase-site.xml_ for Standalone HBase - -[source,xml] - - - - -hbase.rootdir -file:///home/testuser/hbase - - -hbase.zookeeper.property.dataDir -/home/testuser/zookeeper - - -hbase.unsafe.stream.capability.enforce -false - - Controls whether HBase will check for stream capabilities (hflush/hsync). - - Disable this if you intend to run on LocalFileSystem, denoted by a rootdir - with the 'file://' scheme, but be mindful of the NOTE below. - - WARNING: Setting this to false blinds you to potential data loss and - inconsistent system state in the event of process and/or node failures. If - HBase is complaining of an inability to use hsync or hflush it's most - likely not a false positive. - - - - - -+ -You do not need to create the HBase data directory. -HBase will do this for you. If you create the directory, -HBase will attempt to do a migration, which is not what you want. -+ -NOTE: The _hbase.rootdir_ in the above example points to a directory -in the _local filesystem_. The 'file://' prefix is how we denote local -filesystem. You should take the WARNING present in the
[hbase] branch master updated (32e2682 -> 7e295e7)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git. from 32e2682 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null new a602a00 Revert "HBASE-24106 Update getting started documentation after HBASE-24086" new 8148c58 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" new 7e295e7 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box The 3 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .gitignore | 1 + conf/hbase-site.xml| 34 ++- .../apache/hadoop/hbase/util/CommonFSUtils.java| 11 +-- src/main/asciidoc/_chapters/getting_started.adoc | 110 +++-- 4 files changed, 70 insertions(+), 86 deletions(-)
[hbase] 01/03: Revert "HBASE-24106 Update getting started documentation after HBASE-24086"
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git commit a602a00b8dced35667856a106a382fe1319cbd19 Author: Nick Dimiduk AuthorDate: Mon Apr 27 11:13:51 2020 -0700 Revert "HBASE-24106 Update getting started documentation after HBASE-24086" This reverts commit 7de861bb839e05dfbf5709c55387c3be6dd7344b. --- src/main/asciidoc/_chapters/getting_started.adoc | 127 +-- 1 file changed, 75 insertions(+), 52 deletions(-) diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc index c092ebc..e12b7a2 100644 --- a/src/main/asciidoc/_chapters/getting_started.adoc +++ b/src/main/asciidoc/_chapters/getting_started.adoc @@ -55,67 +55,85 @@ See <> for information about supported JDK versions. . Choose a download site from this list of link:https://www.apache.org/dyn/closer.lua/hbase/[Apache Download Mirrors]. Click on the suggested top link. This will take you to a mirror of _HBase Releases_. - Click on the folder named _stable_ and then download the binary file that looks like - _hbase--bin.tar.gz_. + Click on the folder named _stable_ and then download the binary file that ends in _.tar.gz_ to your local filesystem. + Do not download the file ending in _src.tar.gz_ for now. -. Extract the downloaded file and change to the newly-created directory. +. Extract the downloaded file, and change to the newly-created directory. + +[source,subs="attributes"] -$ tar xzvf hbase--bin.tar.gz -$ cd hbase-/ + +$ tar xzvf hbase-{Version}-bin.tar.gz +$ cd hbase-{Version}/ -. Set the `JAVA_HOME` environment variable in _conf/hbase-env.sh_. - First, locate the installation of `java` on your machine. On Unix systems, you can use the - _whereis java_ command. Once you have the location, edit _conf/hbase-env.sh_ file, found inside - the extracted _hbase-_ directory, uncomment the line starting with `#export JAVA_HOME=`, - and then set it to your Java installation path. +. You must set the `JAVA_HOME` environment variable before starting HBase. + To make this easier, HBase lets you set it within the _conf/hbase-env.sh_ file. You must locate where Java is + installed on your machine, and one way to find this is by using the _whereis java_ command. Once you have the location, + edit the _conf/hbase-env.sh_ file and uncomment the line starting with _#export JAVA_HOME=_, and then set it to your Java installation path. + -.Example extract from _conf/hbase-env.sh_ where `JAVA_HOME` is set +.Example extract from _hbase-env.sh_ where _JAVA_HOME_ is set # Set environment variables here. # The java implementation to use. export JAVA_HOME=/usr/jdk64/jdk1.8.0_112 + -. Optionally set the <> property in _conf/hbase-site.xml_. - At this time, you may consider changing the location on the local filesystem where HBase writes - its application data and the data written by its embedded ZooKeeper instance. By default, HBase - uses paths under <> for these directories. -+ -NOTE: On most systems, this is a path created under _/tmp_. Many system periodically delete the - contents of _/tmp_. If you start working with HBase in this way, and then return after the - cleanup operation takes place, you're likely to find strange errors. The following - configuration will place HBase's runtime data in a _tmp_ directory found inside the extracted - _hbase-_ directory, where it will be safe from this periodic cleanup. -+ -Open _conf/hbase-site.xml_ and paste the `` tags between the empty `` -tags. +. Edit _conf/hbase-site.xml_, which is the main HBase configuration file. + At this time, you need to specify the directory on the local filesystem where HBase and ZooKeeper write data and acknowledge some risks. + By default, a new directory is created under /tmp. + Many servers are configured to delete the contents of _/tmp_ upon reboot, so you should store the data elsewhere. + The following configuration will store HBase's data in the _hbase_ directory, in the home directory of the user called `testuser`. + Paste the `` tags beneath the `` tags, which should be empty in a new HBase install. + .Example _hbase-site.xml_ for Standalone HBase [source,xml] + -hbase.tmp.dir -tmp +hbase.rootdir +file:///home/testuser/hbase + + +hbase.zookeeper.property.dataDir +/home/testuser/zookeeper + + +hbase.unsafe.stream.capability.enforce +false + + Controls whether HBase will check for stream capabilities (hflush/hsync). + + Disable this if you intend to run on LocalFileSystem, denoted by a rootdir + with the 'file://' scheme, but be mindful of the NOTE below. + + WARNING: Setting this to false blinds you to potential data loss and + inconsistent system state in the event of process and/or node failures. If
[hbase] 02/03: Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem"
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git commit 8148c5858b524ce829d1b30f3914ffddf6a08804 Author: Nick Dimiduk AuthorDate: Mon Apr 27 11:14:26 2020 -0700 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" This reverts commit 10cbb3fb7310e88de3814396bd7766303c2e65aa. --- .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java | 15 --- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 8b6ba2d..29b3e14 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -28,12 +28,12 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStreamBuilder; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -348,19 +349,11 @@ public final class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propagate this if set +// hadoop-core does fs caching, so need to propogate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); } -if (fs instanceof LocalFileSystem) { - // running on LocalFileSystem, which does not support the required capabilities `HSYNC` - // and `HFLUSH`. disable enforcement. - final boolean value = false; - LOG.warn("Cannot enforce durability guarantees while running on {}. Setting {}={} for" -+ " this FileSystem.", fs.getUri(), UNSAFE_STREAM_CAPABILITY_ENFORCE, value); - fs.getConf().setBoolean(UNSAFE_STREAM_CAPABILITY_ENFORCE, value); -} return fs; }
[hbase] 01/02: Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem"
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 888eaa094dc191490b08809e76502f771e0d288b Author: Nick Dimiduk AuthorDate: Thu May 14 14:06:30 2020 -0700 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" This reverts commit 5de9d7120012938361972fb0b4ee56ca99e9cc6f. --- .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java | 15 --- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index 2e21605..a8c24c5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -28,12 +28,12 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStreamBuilder; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -349,19 +350,11 @@ public final class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propagate this if set +// hadoop-core does fs caching, so need to propogate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); } -if (fs instanceof LocalFileSystem) { - // running on LocalFileSystem, which does not support the required capabilities `HSYNC` - // and `HFLUSH`. disable enforcement. - final boolean value = false; - LOG.warn("Cannot enforce durability guarantees while running on {}. Setting {}={} for" -+ " this FileSystem.", fs.getUri(), UNSAFE_STREAM_CAPABILITY_ENFORCE, value); - fs.getConf().setBoolean(UNSAFE_STREAM_CAPABILITY_ENFORCE, value); -} return fs; }
[hbase] branch branch-2.3 updated (595b861 -> 9f25673)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git. from 595b861 HBASE-24368 Let HBCKSCP clear 'Unknown Servers', even if RegionStateNode has RegionLocation == null new 888eaa0 Revert "HBASE-24086 Disable output stream capability enforcement when running on LocalFileSystem" new 9f25673 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box The 2 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 11 +--- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 42 insertions(+), 81 deletions(-)
[hbase] 02/02: HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git commit 9f25673bb5780cd4483ad4b1cb1a7e4e35e44c2d Author: Nick Dimiduk AuthorDate: Mon Apr 27 13:07:29 2020 -0700 HBASE-24271 Set values in `conf/hbase-site.xml` that enable running on `LocalFileSystem` out of the box Simplify the new user experience shipping a configuration that enables a fresh checkout or tarball distribution to run in standalone mode without direct user configuration. This change restores the behavior we had when running on Hadoop 2.8 and earlier. Patch for master includes an update to the book. This change will be omitted when backporting to earlier branches. Signed-off-by: stack Signed-off-by: Josh Elser Signed-off-by: Duo Zhang --- .gitignore | 1 + conf/hbase-site.xml| 34 +- .../apache/hadoop/hbase/util/CommonFSUtils.java| 4 +- src/main/asciidoc/_chapters/getting_started.adoc | 77 ++ 4 files changed, 42 insertions(+), 74 deletions(-) diff --git a/.gitignore b/.gitignore index 0ae87d9..0b883e0 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ linklint/ .checkstyle **/.checkstyle .java-version +tmp diff --git a/conf/hbase-site.xml b/conf/hbase-site.xml index c516ac7..20c62f7 100644 --- a/conf/hbase-site.xml +++ b/conf/hbase-site.xml @@ -1,8 +1,7 @@ + + +hbase.cluster.distributed +false + + +hbase.tmp.dir +./tmp + + +hbase.unsafe.stream.capability.enforce +false + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index a8c24c5..d46352d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -350,7 +350,7 @@ public final class CommonFSUtils { public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); -// hadoop-core does fs caching, so need to propogate this if set +// hadoop-core does fs caching, so need to propagate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc index 84ebcaa..e50ea6b 100644 --- a/src/main/asciidoc/_chapters/getting_started.adoc +++ b/src/main/asciidoc/_chapters/getting_started.adoc @@ -80,76 +80,12 @@ $ cd hbase-{Version}/ JAVA_HOME=/usr + - -. Edit _conf/hbase-site.xml_, which is the main HBase configuration file. - At this time, you need to specify the directory on the local filesystem where HBase and ZooKeeper write data and acknowledge some risks. - By default, a new directory is created under /tmp. - Many servers are configured to delete the contents of _/tmp_ upon reboot, so you should store the data elsewhere. - The following configuration will store HBase's data in the _hbase_ directory, in the home directory of the user called `testuser`. - Paste the `` tags beneath the `` tags, which should be empty in a new HBase install. -+ -.Example _hbase-site.xml_ for Standalone HBase - -[source,xml] - - - - -hbase.rootdir -file:///home/testuser/hbase - - -hbase.zookeeper.property.dataDir -/home/testuser/zookeeper - - -hbase.unsafe.stream.capability.enforce -false - - Controls whether HBase will check for stream capabilities (hflush/hsync). - - Disable this if you intend to run on LocalFileSystem, denoted by a rootdir - with the 'file://' scheme, but be mindful of the NOTE below. - - WARNING: Setting this to false blinds you to potential data loss and - inconsistent system state in the event of process and/or node failures. If - HBase is complaining of an inability to use hsync or hflush it's most - likely not a false positive. - - - - - -+ -You do not need to create the HBase data directory. -HBase will do this for you. If you create the directory, -HBase will attempt to do a migration, which is not what you want. -+ -NOTE: The _hbase.rootdir_ in the above example points to a directory -in the _local filesystem_. The 'file://' prefix is how we denote local -filesystem. You should take the WARNING present in the configuration example -to heart. In standalone mode HBase makes use of
[hbase] branch branch-1 updated: HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704)
This is an automated email from the ASF dual-hosted git repository. apurtell pushed a commit to branch branch-1 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-1 by this push: new fbe5e68 HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) fbe5e68 is described below commit fbe5e6825138c406445c211372487e73ad888908 Author: Sandeep Pal <50725353+sandeepvina...@users.noreply.github.com> AuthorDate: Thu May 14 10:34:51 2020 -0700 HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) branch-1 backport Signed-off-by: Wellington Chevreuil Signed-off-by: Andrew Purtell Co-Authored-by: Andrew Purtell --- .../MetricsReplicationSourceFactory.java | 1 + ...ory.java => MetricsReplicationTableSource.java} | 12 +- .../MetricsReplicationSourceFactoryImpl.java | 4 + .../MetricsReplicationTableSourceImpl.java | 138 + .../replication/regionserver/MetricsSource.java| 59 +++-- .../regionserver/ReplicationSource.java| 8 +- .../ReplicationSourceWALReaderThread.java | 31 +++-- .../hbase/replication/TestReplicationEndpoint.java | 61 ++--- 8 files changed, 263 insertions(+), 51 deletions(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java index 0e1c5cc..20ba3c9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java @@ -21,5 +21,6 @@ package org.apache.hadoop.hbase.replication.regionserver; public interface MetricsReplicationSourceFactory { public MetricsReplicationSinkSource getSink(); public MetricsReplicationSourceSource getSource(String id); + public MetricsReplicationTableSource getTableSource(String tableName); public MetricsReplicationSourceSource getGlobalSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java similarity index 76% copy from hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java copy to hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java index 0e1c5cc..7d4f754 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hbase.replication.regionserver; -public interface MetricsReplicationSourceFactory { - public MetricsReplicationSinkSource getSink(); - public MetricsReplicationSourceSource getSource(String id); - public MetricsReplicationSourceSource getGlobalSource(); +import org.apache.hadoop.hbase.metrics.BaseSource; + +public interface MetricsReplicationTableSource extends BaseSource { + void setLastShippedAge(long age); + void incrShippedBytes(long size); + long getShippedBytes(); + void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java index b07790f..c64ac58 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java @@ -35,4 +35,8 @@ public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSo @Override public MetricsReplicationSourceSource getGlobalSource() { return new MetricsReplicationGlobalSourceSource(SourceHolder.INSTANCE.source); } + + @Override public MetricsReplicationTableSource getTableSource(String tableName) { +return new MetricsReplicationTableSourceImpl(SourceHolder.INSTANCE.source, tableName); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java new file mode 100644 index 000..5fc1493 --- /dev/null +++
[hbase-site] branch asf-site updated: INFRA-10751 Empty commit
This is an automated email from the ASF dual-hosted git repository. git-site-role pushed a commit to branch asf-site in repository https://gitbox.apache.org/repos/asf/hbase-site.git The following commit(s) were added to refs/heads/asf-site by this push: new f47b78c INFRA-10751 Empty commit f47b78c is described below commit f47b78c15ad277b8a30aaba53c41b072fe32f634 Author: jenkins AuthorDate: Thu May 14 14:54:28 2020 + INFRA-10751 Empty commit
[hbase] branch master updated (5e32e08 -> 2e5a664)
This is an automated email from the ASF dual-hosted git repository. apurtell pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git. from 5e32e08 HBASE-24164 Retain the ReadRequests and WriteRequests of region on we… (#1500) add 2e5a664 HBASE-24350: Extending and Fixing HBaseTable level replication metrics (#1704) No new revisions were added by this update. Summary of changes: .../MetricsReplicationSourceFactory.java | 1 + .../MetricsReplicationSourceFactoryImpl.java | 4 + ...ory.java => MetricsReplicationTableSource.java} | 12 +- .../MetricsReplicationTableSourceImpl.java | 134 + .../replication/regionserver/MetricsSource.java| 36 +- .../regionserver/ReplicationSource.java| 2 +- .../regionserver/ReplicationSourceShipper.java | 8 +- .../regionserver/ReplicationSourceWALReader.java | 6 +- .../replication/regionserver/WALEntryBatch.java| 24 ++-- .../hbase/replication/TestReplicationEndpoint.java | 52 ++-- 10 files changed, 242 insertions(+), 37 deletions(-) copy hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/{MetricsReplicationSourceFactory.java => MetricsReplicationTableSource.java} (78%) create mode 100644 hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java