This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.6 by this push:
new c52b0a07fd2 HBASE-29137 Add basic config type validation (branch-2)
(#6799)
c52b0a07fd2 is described below
commit c52b0a07fd23f6d2c97343410db9a83cee57ceeb
Author: Junegunn Choi <[email protected]>
AuthorDate: Fri Apr 11 23:53:48 2025 +0900
HBASE-29137 Add basic config type validation (branch-2) (#6799)
To make ALTER operations safer.
Signed-off-by: Duo Zhang <[email protected]>
Signed-off-by: Nihal Jain <[email protected]>
Reviewed-by: guluo <[email protected]>
(cherry picked from commit 5f59799739ea30f9a87b36ed360e7e4816877dc2)
---
.../java/org/apache/hadoop/hbase/HConstants.java | 23 +--
.../org/apache/hadoop/hbase/conf/ConfigKey.java | 162 +++++++++++++++++++++
.../apache/hadoop/hbase/conf/TestConfigKey.java | 127 ++++++++++++++++
.../ConstantSizeRegionSplitPolicy.java | 6 +-
.../hbase/regionserver/DefaultStoreEngine.java | 9 +-
.../apache/hadoop/hbase/regionserver/HRegion.java | 8 +-
.../apache/hadoop/hbase/regionserver/HStore.java | 9 +-
.../hadoop/hbase/regionserver/StoreEngine.java | 4 +-
.../hadoop/hbase/regionserver/StoreScanner.java | 6 +-
.../compactions/CompactionConfiguration.java | 46 +++---
.../hadoop/hbase/util/TableDescriptorChecker.java | 9 ++
.../hbase/util/TestTableDescriptorChecker.java | 86 +++++++++++
12 files changed, 453 insertions(+), 42 deletions(-)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 12f8bc6df03..14d7073d5da 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -27,6 +27,7 @@ import java.util.List;
import java.util.UUID;
import java.util.regex.Pattern;
import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -351,7 +352,8 @@ public final class HConstants {
public static final int DEFAULT_VERSION_FILE_WRITE_ATTEMPTS = 3;
/** Parameter name and default value for how often a region should perform a
major compaction */
- public static final String MAJOR_COMPACTION_PERIOD =
"hbase.hregion.majorcompaction";
+ public static final String MAJOR_COMPACTION_PERIOD =
+ ConfigKey.LONG("hbase.hregion.majorcompaction");
public static final long DEFAULT_MAJOR_COMPACTION_PERIOD = 1000 * 60 * 60 *
24 * 7; // 7 days
/**
@@ -360,16 +362,17 @@ public final class HConstants {
* either side of {@link HConstants#MAJOR_COMPACTION_PERIOD}. Default to 0.5
so jitter has us fall
* evenly either side of when the compaction should run.
*/
- public static final String MAJOR_COMPACTION_JITTER =
"hbase.hregion.majorcompaction.jitter";
+ public static final String MAJOR_COMPACTION_JITTER =
+ ConfigKey.FLOAT("hbase.hregion.majorcompaction.jitter");
public static final float DEFAULT_MAJOR_COMPACTION_JITTER = 0.50F;
/** Parameter name for the maximum batch of KVs to be used in flushes and
compactions */
- public static final String COMPACTION_KV_MAX =
"hbase.hstore.compaction.kv.max";
+ public static final String COMPACTION_KV_MAX =
ConfigKey.INT("hbase.hstore.compaction.kv.max");
public static final int COMPACTION_KV_MAX_DEFAULT = 10;
/** Parameter name for the scanner size limit to be used in compactions */
public static final String COMPACTION_SCANNER_SIZE_MAX =
- "hbase.hstore.compaction.scanner.size.limit";
+ ConfigKey.LONG("hbase.hstore.compaction.scanner.size.limit");
public static final long COMPACTION_SCANNER_SIZE_MAX_DEFAULT = 10 * 1024 *
1024L; // 10MB
/** Parameter name for HBase instance root directory */
@@ -429,7 +432,7 @@ public final class HConstants {
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
/** Conf key for the max file size after which we split the region */
- public static final String HREGION_MAX_FILESIZE =
"hbase.hregion.max.filesize";
+ public static final String HREGION_MAX_FILESIZE =
ConfigKey.LONG("hbase.hregion.max.filesize");
/** Default maximum file size */
public static final long DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
@@ -454,7 +457,7 @@ public final class HConstants {
* The max number of threads used for opening and closing stores or store
files in parallel
*/
public static final String HSTORE_OPEN_AND_CLOSE_THREADS_MAX =
- "hbase.hstore.open.and.close.threads.max";
+ ConfigKey.INT("hbase.hstore.open.and.close.threads.max");
/**
* The default number for the max number of threads used for opening and
closing stores or store
@@ -468,7 +471,7 @@ public final class HConstants {
* update traffic.
*/
public static final String HREGION_MEMSTORE_BLOCK_MULTIPLIER =
- "hbase.hregion.memstore.block.multiplier";
+ ConfigKey.INT("hbase.hregion.memstore.block.multiplier", v -> v > 0);
/**
* Default value for hbase.hregion.memstore.block.multiplier
@@ -476,7 +479,8 @@ public final class HConstants {
public static final int DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER = 4;
/** Conf key for the memstore size at which we flush the memstore */
- public static final String HREGION_MEMSTORE_FLUSH_SIZE =
"hbase.hregion.memstore.flush.size";
+ public static final String HREGION_MEMSTORE_FLUSH_SIZE =
+ ConfigKey.LONG("hbase.hregion.memstore.flush.size", v -> v > 0);
public static final String HREGION_EDITS_REPLAY_SKIP_ERRORS =
"hbase.hregion.edits.replay.skip.errors";
@@ -493,7 +497,8 @@ public final class HConstants {
public static final String CLUSTER_ID_DEFAULT = "default-cluster";
/** Parameter name for # days to keep MVCC values during a major compaction
*/
- public static final String KEEP_SEQID_PERIOD =
"hbase.hstore.compaction.keep.seqId.period";
+ public static final String KEEP_SEQID_PERIOD =
+ ConfigKey.INT("hbase.hstore.compaction.keep.seqId.period");
/** At least to keep MVCC values in hfiles for 5 days */
public static final int MIN_KEEP_SEQID_PERIOD = 5;
diff --git
a/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigKey.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigKey.java
new file mode 100644
index 00000000000..45eb99f6a95
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/conf/ConfigKey.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.conf;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Predicate;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class for basic validation of configuration values.
+ */
[email protected]
+public final class ConfigKey {
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigKey.class);
+
+ private ConfigKey() {
+ }
+
+ @FunctionalInterface
+ private interface Validator {
+ void validate(Configuration conf);
+ }
+
+ // Map of configuration keys to validators
+ private static final Map<String, Validator> validators = new
ConcurrentHashMap<>();
+
+ /**
+ * Registers the configuration key that expects an integer.
+ * @param key configuration key
+ * @param predicates additional predicates to validate the value
+ * @return the key
+ */
+ @SafeVarargs
+ public static String INT(String key, Predicate<Integer>... predicates) {
+ return register(key,
+ conf -> validateNumeric(key, "an integer", () -> conf.getInt(key, 0),
predicates));
+ }
+
+ /**
+ * Registers the configuration key that expects a long.
+ * @param key configuration key
+ * @param predicates additional predicates to validate the value
+ * @return the key
+ */
+ @SafeVarargs
+ public static String LONG(String key, Predicate<Long>... predicates) {
+ return register(key,
+ conf -> validateNumeric(key, "a long", () -> conf.getLong(key, 0),
predicates));
+ }
+
+ /**
+ * Registers the configuration key that expects a float.
+ * @param key configuration key
+ * @param predicates additional predicates to validate the value
+ * @return the key
+ */
+ @SafeVarargs
+ public static String FLOAT(String key, Predicate<Float>... predicates) {
+ return register(key,
+ conf -> validateNumeric(key, "a float", () -> conf.getFloat(key, 0),
predicates));
+ }
+
+ /**
+ * Registers the configuration key that expects a double.
+ * @param key configuration key
+ * @param predicates additional predicates to validate the value
+ * @return the key
+ */
+ @SafeVarargs
+ public static String DOUBLE(String key, Predicate<Double>... predicates) {
+ return register(key,
+ conf -> validateNumeric(key, "a double", () -> conf.getDouble(key, 0),
predicates));
+ }
+
+ /**
+ * Registers the configuration key that expects a class.
+ * @param key configuration key
+ * @param expected the expected class or interface the value should implement
+ * @return the key
+ */
+ public static <T> String CLASS(String key, Class<T> expected) {
+ return register(key, conf -> {
+ String value = conf.get(key);
+ try {
+ if (!expected.isAssignableFrom(Class.forName(value))) {
+ throw new IllegalArgumentException(
+ String.format("%s ('%s') is not compatible to %s.", value, key,
expected.getName()));
+ }
+ } catch (ClassNotFoundException e) {
+ throw new IllegalArgumentException(String.format("'%s' must be a
class.", key), e);
+ }
+ });
+ }
+
+ /**
+ * Registers the configuration key that expects a boolean value. We actually
don't register a
+ * validator, because {@link Configuration#getBoolean} doesn't throw an
exception even if the
+ * value is not a boolean. So this is only for documentation purposes.
+ * @param key configuration key
+ * @return the key
+ */
+ public static String BOOLEAN(String key) {
+ return key;
+ }
+
+ /**
+ * Validates the configuration.
+ * @param conf a configuration to validate
+ */
+ public static void validate(Configuration conf) {
+ conf.iterator().forEachRemaining(entry -> {
+ Validator validator = validators.get(entry.getKey());
+ if (validator != null) {
+ validator.validate(conf);
+ }
+ });
+ }
+
+ @FunctionalInterface
+ private interface NumberGetter<T> {
+ T get() throws NumberFormatException;
+ }
+
+ private static <T> void validateNumeric(String name, String expected,
NumberGetter<T> getter,
+ Predicate<T>... predicates) {
+ try {
+ T value = getter.get();
+ for (Predicate<T> predicate : predicates) {
+ if (!predicate.test(value)) {
+ throw new IllegalArgumentException("Invalid value for '" + name +
"': " + value + ".");
+ }
+ }
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(String.format("'%s' must be %s.",
name, expected), e);
+ }
+ }
+
+ private static String register(String key, Validator validator) {
+ LOG.debug("Registering config validator for {}", key);
+ validators.put(key, validator);
+ return key;
+ }
+}
diff --git
a/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigKey.java
b/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigKey.java
new file mode 100644
index 00000000000..eedf0964f2a
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/conf/TestConfigKey.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.conf;
+
+import static org.junit.Assert.fail;
+
+import java.util.UUID;
+import java.util.function.Consumer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompoundConfiguration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestConfigKey {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestConfigKey.class);
+
+ private interface Interface {
+ }
+
+ private class Class implements Interface {
+ }
+
+ private void assertThrows(Runnable r) {
+ try {
+ r.run();
+ fail("validation should have failed");
+ } catch (IllegalArgumentException e) {
+ // Expected
+ }
+ }
+
+ private void assertPasses(Configuration conf, Consumer<Configuration> block)
{
+ Configuration copy = new CompoundConfiguration().add(conf);
+ block.accept(copy);
+ ConfigKey.validate(copy);
+ }
+
+ private void assertThrows(Configuration conf, Consumer<Configuration> block)
{
+ Configuration copy = new CompoundConfiguration().add(conf);
+ block.accept(copy);
+ assertThrows(() -> ConfigKey.validate(copy));
+ }
+
+ @Test
+ public void testConfigKey() {
+ Configuration conf = new CompoundConfiguration();
+
+ String intKey = UUID.randomUUID().toString();
+ ConfigKey.INT(intKey);
+ conf.set(intKey, "1");
+
+ String longKey = UUID.randomUUID().toString();
+ ConfigKey.LONG(longKey);
+ conf.set(longKey, "1");
+
+ String floatKey = UUID.randomUUID().toString();
+ ConfigKey.FLOAT(floatKey);
+ conf.set(floatKey, "1.0");
+
+ String doubleKey = UUID.randomUUID().toString();
+ ConfigKey.DOUBLE(doubleKey);
+ conf.set(doubleKey, "1.0");
+
+ String classKey = UUID.randomUUID().toString();
+ ConfigKey.CLASS(classKey, Interface.class);
+ conf.set(classKey, Class.class.getName());
+
+ String booleanKey = UUID.randomUUID().toString();
+ ConfigKey.BOOLEAN(booleanKey);
+ conf.set(booleanKey, "true");
+
+ // This should pass
+ ConfigKey.validate(conf);
+
+ // Add a predicate to make the validation fail
+ ConfigKey.INT(intKey, i -> i < 0);
+ assertThrows(() -> ConfigKey.validate(conf));
+
+ // New predicates to make the validation pass
+ ConfigKey.INT(intKey, i -> i > 0, i -> i < 2);
+ ConfigKey.validate(conf);
+
+ // Remove the predicate
+ ConfigKey.INT(intKey);
+
+ // Passing examples
+ assertPasses(conf, copy -> copy.set(intKey,
String.valueOf(Integer.MAX_VALUE)));
+ assertPasses(conf, copy -> copy.set(longKey,
String.valueOf(Long.MAX_VALUE)));
+ assertPasses(conf, copy -> copy.set(floatKey,
String.valueOf(Float.MAX_VALUE)));
+ assertPasses(conf, copy -> copy.set(doubleKey,
String.valueOf(Double.MAX_VALUE)));
+
+ // Because Configuration#getBoolean doesn't throw an exception on invalid
values, we don't
+ // validate the value here
+ assertPasses(conf, copy -> copy.set(booleanKey, "yeah?"));
+
+ // Failing examples
+ assertThrows(conf, copy -> copy.set(intKey, "x"));
+ assertThrows(conf, copy -> copy.set(longKey, Long.MAX_VALUE + "0"));
+ assertThrows(conf, copy -> copy.set(longKey, "x"));
+ assertThrows(conf, copy -> copy.set(floatKey, "x"));
+ assertThrows(conf, copy -> copy.set(doubleKey, "x"));
+ assertThrows(conf, copy -> copy.set(classKey, "NoSuchClass"));
+ assertThrows(conf, copy -> copy.set(classKey, getClass().getName()));
+ }
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
index be8fbb1b320..334f242d161 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -42,6 +43,9 @@ public class ConstantSizeRegionSplitPolicy extends
RegionSplitPolicy {
private double jitterRate;
protected boolean overallHRegionFiles;
+ public static final String MAX_FILESIZE_JITTER_KEY =
+ ConfigKey.DOUBLE("hbase.hregion.max.filesize.jitter");
+
@Override
public String toString() {
return "ConstantSizeRegionSplitPolicy{" + "desiredMaxFileSize=" +
desiredMaxFileSize
@@ -62,7 +66,7 @@ public class ConstantSizeRegionSplitPolicy extends
RegionSplitPolicy {
}
this.overallHRegionFiles =
conf.getBoolean(HConstants.OVERALL_HREGION_FILES,
HConstants.DEFAULT_OVERALL_HREGION_FILES);
- double jitter = conf.getDouble("hbase.hregion.max.filesize.jitter", 0.25D);
+ double jitter = conf.getDouble(MAX_FILESIZE_JITTER_KEY, 0.25D);
this.jitterRate = (ThreadLocalRandom.current().nextFloat() - 0.5D) *
jitter;
long jitterValue = (long) (this.desiredMaxFileSize * this.jitterRate);
// Default jitter is ~12% +/-. Make sure the long value won't overflow
with jitter
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
index 7b095596a3d..9e57916a263 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
@@ -23,7 +23,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy;
+import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
import
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
import
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
@@ -41,11 +44,11 @@ public class DefaultStoreEngine extends
StoreEngine<DefaultStoreFlusher, RatioBa
DefaultCompactor, DefaultStoreFileManager> {
public static final String DEFAULT_STORE_FLUSHER_CLASS_KEY =
- "hbase.hstore.defaultengine.storeflusher.class";
+ ConfigKey.CLASS("hbase.hstore.defaultengine.storeflusher.class",
StoreFlusher.class);
public static final String DEFAULT_COMPACTOR_CLASS_KEY =
- "hbase.hstore.defaultengine.compactor.class";
+ ConfigKey.CLASS("hbase.hstore.defaultengine.compactor.class",
Compactor.class);
public static final String DEFAULT_COMPACTION_POLICY_CLASS_KEY =
- "hbase.hstore.defaultengine.compactionpolicy.class";
+ ConfigKey.CLASS("hbase.hstore.defaultengine.compactionpolicy.class",
CompactionPolicy.class);
private static final Class<? extends DefaultStoreFlusher>
DEFAULT_STORE_FLUSHER_CLASS =
DefaultStoreFlusher.class;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 709b38ae926..18b5543ba8d 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -125,6 +125,7 @@ import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -238,11 +239,12 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY =
"hbase.hregion.scan.loadColumnFamiliesOnDemand";
- public static final String HBASE_MAX_CELL_SIZE_KEY =
"hbase.server.keyvalue.maxsize";
+ public static final String HBASE_MAX_CELL_SIZE_KEY =
+ ConfigKey.LONG("hbase.server.keyvalue.maxsize");
public static final int DEFAULT_MAX_CELL_SIZE = 10485760;
public static final String HBASE_REGIONSERVER_MINIBATCH_SIZE =
- "hbase.regionserver.minibatch.size";
+ ConfigKey.INT("hbase.regionserver.minibatch.size");
public static final int DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 20000;
public static final String WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
@@ -1538,7 +1540,7 @@ public class HRegion implements HeapSize,
PropagatingConfigurationObserver, Regi
public static final boolean DEFAULT_FAIR_REENTRANT_CLOSE_LOCK = true;
/** Conf key for the periodic flush interval */
public static final String MEMSTORE_PERIODIC_FLUSH_INTERVAL =
- "hbase.regionserver.optionalcacheflushinterval";
+ ConfigKey.INT("hbase.regionserver.optionalcacheflushinterval");
/** Default interval for the memstore flush */
public static final int DEFAULT_CACHE_FLUSH_INTERVAL = 3600000;
/** Default interval for System tables memstore flush */
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 710c9475309..1f37d08f729 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.backup.FailedArchiveException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
import org.apache.hadoop.hbase.coprocessor.ReadOnlyConfiguration;
@@ -129,10 +130,12 @@ import
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDes
@InterfaceAudience.Private
public class HStore
implements Store, HeapSize, StoreConfigInformation,
PropagatingConfigurationObserver {
- public static final String MEMSTORE_CLASS_NAME =
"hbase.regionserver.memstore.class";
+ public static final String MEMSTORE_CLASS_NAME =
+ ConfigKey.CLASS("hbase.regionserver.memstore.class", MemStore.class);
public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY =
- "hbase.server.compactchecker.interval.multiplier";
- public static final String BLOCKING_STOREFILES_KEY =
"hbase.hstore.blockingStoreFiles";
+ ConfigKey.INT("hbase.server.compactchecker.interval.multiplier");
+ public static final String BLOCKING_STOREFILES_KEY =
+ ConfigKey.INT("hbase.hstore.blockingStoreFiles");
public static final String BLOCK_STORAGE_POLICY_KEY =
"hbase.hstore.block.storage.policy";
// "NONE" is not a valid storage policy and means we defer the policy to HDFS
public static final String DEFAULT_BLOCK_STORAGE_POLICY = "NONE";
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
index 8d81c90144f..a696032d59f 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.io.hfile.BloomFilterMetrics;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -118,7 +119,8 @@ public abstract class StoreEngine<SF extends StoreFlusher,
CP extends Compaction
* The name of the configuration parameter that specifies the class of a
store engine that is used
* to manage and compact HBase store files.
*/
- public static final String STORE_ENGINE_CLASS_KEY =
"hbase.hstore.engine.class";
+ public static final String STORE_ENGINE_CLASS_KEY =
+ ConfigKey.CLASS("hbase.hstore.engine.class", StoreEngine.class);
private static final Class<? extends StoreEngine<?, ?, ?, ?>>
DEFAULT_STORE_ENGINE_CLASS =
DefaultStoreEngine.class;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 451ff93137a..bc67b69b850 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.ipc.RpcCall;
@@ -127,7 +128,7 @@ public class StoreScanner extends
NonReversedNonLazyKeyValueScanner
* timeout checks.
*/
public static final String HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK =
- "hbase.cells.scanned.per.heartbeat.check";
+ ConfigKey.LONG("hbase.cells.scanned.per.heartbeat.check");
/**
* Default value of {@link #HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK}.
@@ -140,7 +141,8 @@ public class StoreScanner extends
NonReversedNonLazyKeyValueScanner
* block size for this store. If configured with a value <0, for all scans
with ReadType DEFAULT,
* we will open scanner with stream mode itself.
*/
- public static final String STORESCANNER_PREAD_MAX_BYTES =
"hbase.storescanner.pread.max.bytes";
+ public static final String STORESCANNER_PREAD_MAX_BYTES =
+ ConfigKey.LONG("hbase.storescanner.pread.max.bytes");
private final Scan.ReadType readType;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index 2bc2674268c..26ced48d368 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -22,6 +22,7 @@ import static
org.apache.hadoop.hbase.regionserver.StoreFileWriter.shouldEnableH
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
@@ -48,43 +49,48 @@ public class CompactionConfiguration {
private static final Logger LOG =
LoggerFactory.getLogger(CompactionConfiguration.class);
- public static final String HBASE_HSTORE_COMPACTION_RATIO_KEY =
"hbase.hstore.compaction.ratio";
+ public static final String HBASE_HSTORE_COMPACTION_RATIO_KEY =
+ ConfigKey.FLOAT("hbase.hstore.compaction.ratio");
public static final String HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY =
- "hbase.hstore.compaction.ratio.offpeak";
- public static final String HBASE_HSTORE_COMPACTION_MIN_KEY =
"hbase.hstore.compaction.min";
+ ConfigKey.FLOAT("hbase.hstore.compaction.ratio.offpeak");
+ public static final String HBASE_HSTORE_COMPACTION_MIN_KEY =
+ ConfigKey.INT("hbase.hstore.compaction.min");
public static final String HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY =
- "hbase.hstore.compaction.min.size";
- public static final String HBASE_HSTORE_COMPACTION_MAX_KEY =
"hbase.hstore.compaction.max";
+ ConfigKey.LONG("hbase.hstore.compaction.min.size");
+ public static final String HBASE_HSTORE_COMPACTION_MAX_KEY =
+ ConfigKey.INT("hbase.hstore.compaction.max");
public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY =
- "hbase.hstore.compaction.max.size";
+ ConfigKey.LONG("hbase.hstore.compaction.max.size");
public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY =
- "hbase.hstore.compaction.max.size.offpeak";
- public static final String HBASE_HSTORE_OFFPEAK_END_HOUR =
"hbase.offpeak.end.hour";
- public static final String HBASE_HSTORE_OFFPEAK_START_HOUR =
"hbase.offpeak.start.hour";
+ ConfigKey.LONG("hbase.hstore.compaction.max.size.offpeak");
+ public static final String HBASE_HSTORE_OFFPEAK_END_HOUR =
+ ConfigKey.INT("hbase.offpeak.end.hour");
+ public static final String HBASE_HSTORE_OFFPEAK_START_HOUR =
+ ConfigKey.INT("hbase.offpeak.start.hour");
public static final String HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT =
- "hbase.hstore.min.locality.to.skip.major.compact";
+ ConfigKey.FLOAT("hbase.hstore.min.locality.to.skip.major.compact");
public static final String HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT =
- "hbase.hfile.compaction.discharger.thread.count";
+ ConfigKey.INT("hbase.hfile.compaction.discharger.thread.count");
/*
* The epoch time length for the windows we no longer compact
*/
public static final String DATE_TIERED_MAX_AGE_MILLIS_KEY =
- "hbase.hstore.compaction.date.tiered.max.storefile.age.millis";
+
ConfigKey.LONG("hbase.hstore.compaction.date.tiered.max.storefile.age.millis");
public static final String DATE_TIERED_INCOMING_WINDOW_MIN_KEY =
- "hbase.hstore.compaction.date.tiered.incoming.window.min";
- public static final String
COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY =
- "hbase.hstore.compaction.date.tiered.window.policy.class";
+ ConfigKey.INT("hbase.hstore.compaction.date.tiered.incoming.window.min");
+ public static final String
COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY = ConfigKey.CLASS(
+ "hbase.hstore.compaction.date.tiered.window.policy.class",
RatioBasedCompactionPolicy.class);
public static final String
DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY =
- "hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction";
+
ConfigKey.BOOLEAN("hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction");
private static final Class<
? extends RatioBasedCompactionPolicy>
DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS =
ExploringCompactionPolicy.class;
- public static final String DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY =
- "hbase.hstore.compaction.date.tiered.window.factory.class";
+ public static final String DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY =
ConfigKey.CLASS(
+ "hbase.hstore.compaction.date.tiered.window.factory.class",
CompactionWindowFactory.class);
private static final Class<
? extends CompactionWindowFactory>
DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS =
@@ -93,11 +99,11 @@ public class CompactionConfiguration {
public static final String DATE_TIERED_STORAGE_POLICY_ENABLE_KEY =
"hbase.hstore.compaction.date.tiered.storage.policy.enable";
public static final String DATE_TIERED_HOT_WINDOW_AGE_MILLIS_KEY =
- "hbase.hstore.compaction.date.tiered.hot.window.age.millis";
+
ConfigKey.LONG("hbase.hstore.compaction.date.tiered.hot.window.age.millis");
public static final String DATE_TIERED_HOT_WINDOW_STORAGE_POLICY_KEY =
"hbase.hstore.compaction.date.tiered.hot.window.storage.policy";
public static final String DATE_TIERED_WARM_WINDOW_AGE_MILLIS_KEY =
- "hbase.hstore.compaction.date.tiered.warm.window.age.millis";
+
ConfigKey.LONG("hbase.hstore.compaction.date.tiered.warm.window.age.millis");
public static final String DATE_TIERED_WARM_WINDOW_STORAGE_POLICY_KEY =
"hbase.hstore.compaction.date.tiered.warm.window.storage.policy";
/** Windows older than warm age belong to COLD_WINDOW **/
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index 94e2e4bbfa0..36785e5d5a5 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
import org.apache.hadoop.hbase.regionserver.HStore;
@@ -79,6 +80,14 @@ public final class TableDescriptorChecker {
// Setting logs to warning instead of throwing exception if sanityChecks
are disabled
boolean logWarn = !shouldSanityCheck(conf);
+ // Check value types
+ warnOrThrowExceptionForFailure(logWarn, () -> ConfigKey.validate(conf));
+ warnOrThrowExceptionForFailure(logWarn, () -> {
+ for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+ ConfigKey.validate(new
CompoundConfiguration().addBytesMap(cfd.getValues()));
+ }
+ });
+
// check max file size
long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower
limit
// if not set MAX_FILESIZE in TableDescriptor, and not set
HREGION_MAX_FILESIZE in
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableDescriptorChecker.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableDescriptorChecker.java
new file mode 100644
index 00000000000..2a8d48783f2
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableDescriptorChecker.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.conf.ConfigKey;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestTableDescriptorChecker {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestTableDescriptorChecker.class);
+
+ @Test
+ public void testSanityCheck() throws IOException {
+ Configuration conf = new Configuration();
+ TableDescriptorBuilder t =
TableDescriptorBuilder.newBuilder(TableName.valueOf("test"));
+ ColumnFamilyDescriptorBuilder cf =
ColumnFamilyDescriptorBuilder.newBuilder("cf".getBytes());
+ t.setColumnFamily(cf.build());
+
+ // Empty configuration. Should be fine.
+ TableDescriptorChecker.sanityCheck(conf, t.build());
+
+ // Declare configuration type as int.
+ String key = "hbase.hstore.compaction.ratio";
+ ConfigKey.INT(key, v -> v > 0);
+
+ // Error in table configuration.
+ t.setValue(key, "xx");
+ try {
+ TableDescriptorChecker.sanityCheck(conf, t.build());
+ fail("Should have thrown IllegalArgumentException");
+ } catch (DoNotRetryIOException e) {
+ // Expected
+ }
+
+ // Fix the error.
+ t.setValue(key, "1");
+ TableDescriptorChecker.sanityCheck(conf, t.build());
+
+ // Error in column family configuration.
+ cf.setValue(key, "xx");
+ t.removeColumnFamily("cf".getBytes());
+ t.setColumnFamily(cf.build());
+ try {
+ TableDescriptorChecker.sanityCheck(conf, t.build());
+ fail("Should have thrown IllegalArgumentException");
+ } catch (DoNotRetryIOException e) {
+ // Expected
+ }
+
+ // Fix the error.
+ cf.setValue(key, "1");
+ t.removeColumnFamily("cf".getBytes());
+ t.setColumnFamily(cf.build());
+ TableDescriptorChecker.sanityCheck(conf, t.build());
+ }
+}