This is an automated email from the ASF dual-hosted git repository.
asdf2014 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 646fa84 Support unit on byte-related properties (#10203)
646fa84 is described below
commit 646fa84d043a075e64efa0d65205a17b066204e9
Author: frank chen <[email protected]>
AuthorDate: Fri Jul 31 09:58:48 2020 +0800
Support unit on byte-related properties (#10203)
* support unit suffix on byte-related properties
* add doc
* change default value of byte-related properites in example files
* fix coding style
* fix doc
* fix CI
* suppress spelling errors
* improve code according to comments
* rename Bytes to HumanReadableBytes
* add getBytesInInt to get value safely
* improve doc
* fix problem reported by CI
* fix problem reported by CI
* resolve code review comments
* improve error message
* improve code & doc according to comments
* fix CI problem
* improve doc
* suppress spelling check errors
---
.../druid/java/util/common/HumanReadableBytes.java | 211 +++++++++++
.../java/util/common/HumanReadableBytesRange.java | 96 +++++
.../util/common/HumanReadableBytesSerializer.java | 57 +++
.../java/util/common/HumanReadableBytesTest.java | 398 +++++++++++++++++++++
docs/configuration/human-readable-byte.md | 87 +++++
docs/configuration/index.md | 16 +-
.../cluster/data/historical/runtime.properties | 8 +-
.../cluster/data/middleManager/runtime.properties | 2 +-
.../druid/cluster/query/broker/runtime.properties | 4 +-
.../single-server/large/broker/runtime.properties | 4 +-
.../large/historical/runtime.properties | 8 +-
.../large/middleManager/runtime.properties | 2 +-
.../single-server/medium/broker/runtime.properties | 4 +-
.../medium/historical/runtime.properties | 8 +-
.../medium/middleManager/runtime.properties | 2 +-
.../micro-quickstart/broker/runtime.properties | 4 +-
.../micro-quickstart/historical/runtime.properties | 8 +-
.../middleManager/runtime.properties | 2 +-
.../nano-quickstart/broker/runtime.properties | 4 +-
.../nano-quickstart/historical/runtime.properties | 8 +-
.../middleManager/runtime.properties | 2 +-
.../single-server/small/broker/runtime.properties | 4 +-
.../small/historical/runtime.properties | 8 +-
.../small/middleManager/runtime.properties | 2 +-
.../single-server/xlarge/broker/runtime.properties | 4 +-
.../xlarge/historical/runtime.properties | 8 +-
.../xlarge/middleManager/runtime.properties | 2 +-
.../overlord/config/RemoteTaskRunnerConfig.java | 13 +-
.../apache/druid/query/DruidProcessingConfig.java | 7 +-
.../org/apache/druid/client/DruidServerConfig.java | 9 +-
.../druid/client/cache/CaffeineCacheConfig.java | 5 +-
.../druid/guice/http/DruidHttpClientConfig.java | 5 +-
.../segment/loading/StorageLocationConfig.java | 14 +-
.../druid/server/initialization/ServerConfig.java | 13 +-
website/.spelling | 20 ++
35 files changed, 968 insertions(+), 81 deletions(-)
diff --git
a/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytes.java
b/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytes.java
new file mode 100644
index 0000000..9298318
--- /dev/null
+++
b/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytes.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.java.util.common;
+
+import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+
+@JsonSerialize(using = HumanReadableBytesSerializer.class)
+public class HumanReadableBytes
+{
+ public static final HumanReadableBytes ZERO = new HumanReadableBytes(0L);
+
+ private final long bytes;
+
+ public HumanReadableBytes(String bytes)
+ {
+ this.bytes = HumanReadableBytes.parse(bytes);
+ }
+
+ public HumanReadableBytes(long bytes)
+ {
+ this.bytes = bytes;
+ }
+
+ public long getBytes()
+ {
+ return bytes;
+ }
+
+ public int getBytesInInt()
+ {
+ if (bytes > Integer.MAX_VALUE) {
+ throw new ISE("Number overflow");
+ }
+
+ return (int) bytes;
+ }
+
+ @Override
+ public boolean equals(Object thatObj)
+ {
+ if (thatObj == null) {
+ return false;
+ }
+ if (thatObj instanceof HumanReadableBytes) {
+ return bytes == ((HumanReadableBytes) thatObj).bytes;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return Long.hashCode(bytes);
+ }
+
+ @Override
+ public String toString()
+ {
+ return String.valueOf(bytes);
+ }
+
+ public static HumanReadableBytes valueOf(int bytes)
+ {
+ return new HumanReadableBytes(bytes);
+ }
+
+ public static HumanReadableBytes valueOf(long bytes)
+ {
+ return new HumanReadableBytes(bytes);
+ }
+
+ public static long parse(String number)
+ {
+ if (number == null) {
+ throw new IAE("Invalid format of number: number is null");
+ }
+
+ number = number.trim();
+ if (number.length() == 0) {
+ throw new IAE("Invalid format of number: number is blank");
+ }
+
+ return parseInner(number);
+ }
+
+ /**
+ * parse the case-insensitive string number, which is either:
+ * <p>
+ * a number string
+ * <p>
+ * or
+ * <p>
+ * a number string with a suffix which indicates the unit the of number
+ * the unit must be one of following
+ * k - kilobyte = 1000
+ * m - megabyte = 1,000,000
+ * g - gigabyte = 1,000,000,000
+ * t - terabyte = 1,000,000,000,000
+ * p - petabyte = 1,000,000,000,000,000
+ * KiB - kilo binary byte = 1024
+ * MiB - mega binary byte = 1024*1204
+ * GiB - giga binary byte = 1024*1024*1024
+ * TiB - tera binary byte = 1024*1024*1024*1024
+ * PiB - peta binary byte = 1024*1024*1024*1024*1024
+ * <p>
+ *
+ * @param nullValue to be returned when given number is null or empty
+ * @return nullValue if input is null or empty
+ * value of number
+ * @throws IAE if the input is invalid
+ */
+ public static long parse(String number, long nullValue)
+ {
+ if (number == null) {
+ return nullValue;
+ }
+
+ number = number.trim();
+ if (number.length() == 0) {
+ return nullValue;
+ }
+ return parseInner(number);
+ }
+
+ private static long parseInner(String rawNumber)
+ {
+ String number = StringUtils.toLowerCase(rawNumber);
+ if (number.charAt(0) == '-') {
+ throw new IAE("Invalid format of number: %s. Negative value is not
allowed.", rawNumber);
+ }
+
+ int lastDigitIndex = number.length() - 1;
+ boolean isBinaryByte = false;
+ char unit = number.charAt(lastDigitIndex--);
+ if (unit == 'b') {
+ //unit ends with 'b' must be format of KiB/MiB/GiB/TiB/PiB, so at least
3 extra characters are required
+ if (lastDigitIndex < 2) {
+ throw new IAE("Invalid format of number: %s", rawNumber);
+ }
+ if (number.charAt(lastDigitIndex--) != 'i') {
+ throw new IAE("Invalid format of number: %s", rawNumber);
+ }
+
+ unit = number.charAt(lastDigitIndex--);
+ isBinaryByte = true;
+ }
+
+ long base = 1;
+ switch (unit) {
+ case 'k':
+ base = isBinaryByte ? 1024 : 1_000;
+ break;
+
+ case 'm':
+ base = isBinaryByte ? 1024 * 1024 : 1_000_000;
+ break;
+
+ case 'g':
+ base = isBinaryByte ? 1024 * 1024 * 1024 : 1_000_000_000;
+ break;
+
+ case 't':
+ base = isBinaryByte ? 1024L * 1024 * 1024 * 1024 : 1_000_000_000_000L;
+ break;
+
+ case 'p':
+ base = isBinaryByte ? 1024L * 1024 * 1024 * 1024 * 1024 :
1_000_000_000_000_000L;
+ break;
+
+ default:
+ if (!Character.isDigit(unit)) {
+ throw new IAE("Invalid format of number: %s", rawNumber);
+ }
+
+ //lastDigitIndex here holds the index which is prior to current digit
+ //move backward so that it's at the right place
+ lastDigitIndex++;
+ break;
+ }
+
+ try {
+ long value = Long.parseLong(number.substring(0, lastDigitIndex + 1)) *
base;
+ if (base > 1 && value < base) {
+ //for base == 1, overflow has been checked in parseLong
+ throw new IAE("Number overflow: %s", rawNumber);
+ }
+ return value;
+ }
+ catch (NumberFormatException e) {
+ throw new IAE("Invalid format or out of range of long: %s", rawNumber);
+ }
+ }
+}
diff --git
a/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytesRange.java
b/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytesRange.java
new file mode 100644
index 0000000..7e5a896
--- /dev/null
+++
b/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytesRange.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.java.util.common;
+
+import javax.validation.Constraint;
+import javax.validation.ConstraintValidator;
+import javax.validation.ConstraintValidatorContext;
+import javax.validation.Payload;
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation is an extension of java validation framework to ensure the
validity of value of {@link HumanReadableBytes}.
+ *
+ * To use it, put it on a field of type of {@link HumanReadableBytes}.
+ * For example,
+ *
+ * <code>
+ * class Size {
+ * @HumanReadableBytesRange( min = 5, max = 1024 )
+ * private HumanReadableBytes size;
+ * }
+ * </code>
+ *
+ *
+ */
+@Target({ElementType.METHOD, ElementType.FIELD, ElementType.ANNOTATION_TYPE,
ElementType.CONSTRUCTOR, ElementType.PARAMETER})
+@Retention(RetentionPolicy.RUNTIME)
+@Documented
+@Constraint(validatedBy =
HumanReadableBytesRange.HumanReadableBytesRangeValidator.class)
+public @interface HumanReadableBytesRange
+{
+ /**
+ * lower bound of {@link HumanReadableBytes}. Inclusive
+ */
+ long min() default 0;
+
+ /**
+ * upper bound of {@link HumanReadableBytes}. Inclusive
+ */
+ long max() default Long.MAX_VALUE;
+
+ //ConstraintValidator requires
+ Class<?>[] groups() default {};
+
+ //ConstraintValidator requires
+ String message() default "value is out of range";
+
+ //ConstraintValidator requires
+ Class<? extends Payload>[] payload() default {};
+
+ class HumanReadableBytesRangeValidator implements
ConstraintValidator<HumanReadableBytesRange, Object>
+ {
+ private HumanReadableBytesRange range;
+
+ @Override
+ public void initialize(HumanReadableBytesRange range)
+ {
+ this.range = range;
+ }
+
+ @Override
+ public boolean isValid(Object value, ConstraintValidatorContext context)
+ {
+ if (value == null) {
+ return true;
+ }
+ if (value instanceof HumanReadableBytes) {
+ long bytes = ((HumanReadableBytes) value).getBytes();
+ return bytes >= range.min() &&
+ bytes <= range.max();
+ }
+ return true;
+ }
+ }
+}
diff --git
a/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytesSerializer.java
b/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytesSerializer.java
new file mode 100644
index 0000000..8864b1a
--- /dev/null
+++
b/core/src/main/java/org/apache/druid/java/util/common/HumanReadableBytesSerializer.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.java.util.common;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+import java.io.IOException;
+
+/**
+ *
+ * This serializer provides the basic support of JSON serialization of {@link
HumanReadableBytes}
+ * to demonstrate serialization and deserialization work
+ *
+ * Given
+ * <p>
+ * HumanReadableBytes bytes = new HumanReadableBytes("1K");
+ * </p>
+ * will be serialized as 1000 instead of the raw input of 1K
+ *
+ *
+ */
+public class HumanReadableBytesSerializer extends
JsonSerializer<HumanReadableBytes>
+{
+ @Override
+ public void serialize(
+ HumanReadableBytes value,
+ JsonGenerator jgen,
+ SerializerProvider provider) throws IOException
+ {
+ jgen.writeNumber(value.getBytes());
+ }
+
+ @Override
+ public Class<HumanReadableBytes> handledType()
+ {
+ return HumanReadableBytes.class;
+ }
+}
diff --git
a/core/src/test/java/org/apache/druid/java/util/common/HumanReadableBytesTest.java
b/core/src/test/java/org/apache/druid/java/util/common/HumanReadableBytesTest.java
new file mode 100644
index 0000000..e4364e4
--- /dev/null
+++
b/core/src/test/java/org/apache/druid/java/util/common/HumanReadableBytesTest.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.java.util.common;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import javax.validation.ConstraintViolation;
+import javax.validation.Validation;
+import javax.validation.Validator;
+import javax.validation.groups.Default;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+public class HumanReadableBytesTest
+{
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
+
+ @Test
+ public void testNumberString()
+ {
+ Assert.assertEquals(0, HumanReadableBytes.parse("0"));
+ Assert.assertEquals(1, HumanReadableBytes.parse("1"));
+ Assert.assertEquals(10000000, HumanReadableBytes.parse("10000000"));
+ }
+
+ @Test
+ public void testWithWhiteSpace()
+ {
+ Assert.assertEquals(12345, HumanReadableBytes.parse(" 12345 "));
+ Assert.assertEquals(12345, HumanReadableBytes.parse("\t12345\t"));
+ }
+
+ @Test
+ public void testK()
+ {
+ Assert.assertEquals(1000, HumanReadableBytes.parse("1k"));
+ Assert.assertEquals(1000, HumanReadableBytes.parse("1K"));
+ }
+
+ @Test
+ public void testM()
+ {
+ Assert.assertEquals(1000_000, HumanReadableBytes.parse("1m"));
+ Assert.assertEquals(1000_000, HumanReadableBytes.parse("1M"));
+ }
+
+ @Test
+ public void testG()
+ {
+ Assert.assertEquals(1000_000_000, HumanReadableBytes.parse("1g"));
+ Assert.assertEquals(1000_000_000, HumanReadableBytes.parse("1G"));
+ }
+
+ @Test
+ public void testT()
+ {
+ Assert.assertEquals(1000_000_000_000L, HumanReadableBytes.parse("1t"));
+ Assert.assertEquals(1000_000_000_000L, HumanReadableBytes.parse("1T"));
+ }
+
+ @Test
+ public void testKiB()
+ {
+ Assert.assertEquals(1024, HumanReadableBytes.parse("1kib"));
+ Assert.assertEquals(9 * 1024, HumanReadableBytes.parse("9KiB"));
+ Assert.assertEquals(9 * 1024, HumanReadableBytes.parse("9Kib"));
+ }
+
+ @Test
+ public void testMiB()
+ {
+ Assert.assertEquals(1024 * 1024, HumanReadableBytes.parse("1mib"));
+ Assert.assertEquals(9 * 1024 * 1024, HumanReadableBytes.parse("9MiB"));
+ Assert.assertEquals(9 * 1024 * 1024, HumanReadableBytes.parse("9Mib"));
+ }
+
+ @Test
+ public void testGiB()
+ {
+ Assert.assertEquals(1024 * 1024 * 1024, HumanReadableBytes.parse("1gib"));
+ Assert.assertEquals(1024 * 1024 * 1024, HumanReadableBytes.parse("1GiB"));
+ Assert.assertEquals(9L * 1024 * 1024 * 1024,
HumanReadableBytes.parse("9Gib"));
+ }
+
+ @Test
+ public void testTiB()
+ {
+ Assert.assertEquals(1024L * 1024 * 1024 * 1024,
HumanReadableBytes.parse("1tib"));
+ Assert.assertEquals(9L * 1024 * 1024 * 1024 * 1024,
HumanReadableBytes.parse("9TiB"));
+ Assert.assertEquals(9L * 1024 * 1024 * 1024 * 1024,
HumanReadableBytes.parse("9Tib"));
+ }
+
+ @Test
+ public void testPiB()
+ {
+ Assert.assertEquals(1024L * 1024 * 1024 * 1024 * 1024,
HumanReadableBytes.parse("1pib"));
+ Assert.assertEquals(9L * 1024 * 1024 * 1024 * 1024 * 1024,
HumanReadableBytes.parse("9PiB"));
+ Assert.assertEquals(9L * 1024 * 1024 * 1024 * 1024 * 1024,
HumanReadableBytes.parse("9Pib"));
+ }
+
+ @Test
+ public void testDefault()
+ {
+ Assert.assertEquals(-123, HumanReadableBytes.parse(" ", -123));
+ Assert.assertEquals(-456, HumanReadableBytes.parse(null, -456));
+ Assert.assertEquals(-789, HumanReadableBytes.parse("\t", -789));
+ }
+
+ static class ExceptionMatcher implements Matcher
+ {
+ static ExceptionMatcher INVALIDFORMAT = new ExceptionMatcher("Invalid
format");
+ static ExceptionMatcher OVERFLOW = new ExceptionMatcher("Number overflow");
+
+ private String prefix;
+
+ public ExceptionMatcher(String prefix)
+ {
+ this.prefix = prefix;
+ }
+
+ @Override
+ public boolean matches(Object item)
+ {
+ if (!(item instanceof IAE)) {
+ return false;
+ }
+
+ return ((IAE) item).getMessage().startsWith(prefix);
+ }
+
+ @Override
+ public void describeMismatch(Object item, Description mismatchDescription)
+ {
+ }
+
+ @Override
+ public void _dont_implement_Matcher___instead_extend_BaseMatcher_()
+ {
+ }
+
+ @Override
+ public void describeTo(Description description)
+ {
+ }
+ }
+
+ @Test
+ public void testNull()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse(null);
+ }
+
+ @Test
+ public void testEmpty()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("");
+ }
+
+ @Test
+ public void testWhitespace()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse(" ");
+ }
+
+ @Test
+ public void testNegative()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("-1");
+ }
+
+ @Test
+ public void testInvalidFormatOneChar()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("b");
+ }
+
+ @Test
+ public void testInvalidFormatOneChar2()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("B");
+ }
+
+ @Test
+ public void testInvalidFormatExtraSpace()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("1 b");
+ }
+
+ @Test
+ public void testInvalidFormat4()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("1b");
+ }
+
+ @Test
+ public void testInvalidFormatMiBExtraSpace()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("1 mib");
+ }
+
+ @Test
+ public void testInvalidFormatTiB()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("tib");
+ }
+
+ @Test
+ public void testInvalidFormatGiB()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("gib");
+ }
+
+ @Test
+ public void testInvalidFormatPiB()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse(" pib");
+ }
+
+ @Test
+ public void testInvalidCharacter()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ HumanReadableBytes.parse("c");
+ }
+
+ @Test
+ public void testExtraLargeNumber()
+ {
+ expectedException.expect(ExceptionMatcher.INVALIDFORMAT);
+ String extraLarge = Long.MAX_VALUE + "1";
+ HumanReadableBytes.parse(extraLarge);
+ }
+
+ @Test
+ public void testOverflowK()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / 1000 + 1) + "k";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowM()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / 1000_000 + 1) + "m";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowG()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / 1000_000_000L + 1) + "g";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowT()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / 1000_000_000_000L + 1) + "t";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowP()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / 1_000_000_000_000_000L + 1) + "p";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowKiB()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / 1024 + 1) + "KiB";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowMiB()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / (1024 * 1024) + 1) + "MiB";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowGiB()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / (1024L * 1024 * 1024) + 1) + "GiB";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowTiB()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / (1024L * 1024 * 1024 * 1024) + 1) +
"TiB";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testOverflowPiB()
+ {
+ expectedException.expect(ExceptionMatcher.OVERFLOW);
+ String overflow = (Long.MAX_VALUE / (1024L * 1024 * 1024 * 1024 * 1024) +
1) + "PiB";
+ HumanReadableBytes.parse(overflow);
+ }
+
+ @Test
+ public void testJSON() throws JsonProcessingException
+ {
+ ObjectMapper mapper = new ObjectMapper();
+ HumanReadableBytes bytes = new HumanReadableBytes("5m");
+ String serialized = mapper.writeValueAsString(bytes);
+ HumanReadableBytes deserialized = mapper.readValue(serialized,
HumanReadableBytes.class);
+ Assert.assertEquals(bytes, deserialized);
+ }
+
+ static class TestBytesRange
+ {
+ @HumanReadableBytesRange(min = 0, max = 5)
+ HumanReadableBytes bytes;
+
+ public TestBytesRange(HumanReadableBytes bytes)
+ {
+ this.bytes = bytes;
+ }
+ }
+
+ @Test
+ public void testBytesRange()
+ {
+ long errorCount = validate(new
TestBytesRange(HumanReadableBytes.valueOf(-1)));
+ Assert.assertEquals(1, errorCount);
+
+ errorCount = validate(new TestBytesRange(HumanReadableBytes.valueOf(0)));
+ Assert.assertEquals(0, errorCount);
+
+ errorCount = validate(new TestBytesRange(HumanReadableBytes.valueOf(5)));
+ Assert.assertEquals(0, errorCount);
+
+ errorCount = validate(new TestBytesRange(HumanReadableBytes.valueOf(6)));
+ Assert.assertEquals(1, errorCount);
+ }
+
+ private static <T> long validate(T obj)
+ {
+ Validator validator = Validation.buildDefaultValidatorFactory()
+ .getValidator();
+
+ Map<String, StringBuilder> errorMap = new HashMap<>();
+ Set<ConstraintViolation<T>> set = validator.validate(obj, Default.class);
+ return set == null ? 0 : set.size();
+ }
+}
diff --git a/docs/configuration/human-readable-byte.md
b/docs/configuration/human-readable-byte.md
new file mode 100644
index 0000000..7ee3999
--- /dev/null
+++ b/docs/configuration/human-readable-byte.md
@@ -0,0 +1,87 @@
+---
+id: human-readable-byte
+title: "Human-readable Byte Configuration Reference"
+---
+
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+ -->
+
+
+This page documents configuration properties related to bytes.
+
+These properties can be configured through 2 ways:
+1. a simple number in bytes
+2. a number with a unit suffix
+
+## A number in bytes
+
+Given that cache size is 3G, there's a configuration as below
+
+```properties
+# 3G bytes = 3_000_000_000 bytes
+druid.cache.sizeInBytes=3000000000
+```
+
+
+## A number with a unit suffix
+
+When you have to put a large number for some configuration as above, it is
easy to make a mistake such as extra or missing 0s. Druid supports a better
way, a number with a unit suffix.
+
+Given a disk of 1T, the configuration can be
+
+```properties
+druid.segmentCache.locations=[{"path":"/segment-cache","maxSize":"1t"}]
+```
+
+Note: in above example, both `1t` and `1T` are acceptable since it's
case-insensitive.
+
+### Supported Units
+In the world of computer, a unit like `K` is ambiguous. It means 1000 or 1024
in different contexts, for more information please see
[Here](https://en.wikipedia.org/wiki/Binary_prefix).
+
+To make it clear, the base of units are defined in Druid as below
+
+| Unit | Description | Base |
+|---|---|---|
+| K | Kilo Decimal Byte | 1_000 |
+| M | Mega Decimal Byte | 1_000_000 |
+| G | Giga Decimal Byte | 1_000_000_000 |
+| T | Tera Decimal Byte | 1_000_000_000_000 |
+| P | Peta Decimal Byte | 1_000_000_000_000_000 |
+| KiB | Kilo Binary Byte | 1024 |
+| MiB | Mega Binary Byte | 1024 * 1024 |
+| GiB | Giga Binary Byte | 1024 * 1024 * 1024 |
+| TiB | Tera Binary Byte | 1024 * 1024 * 1024 * 1024 |
+| PiB | Peta Binary Byte | 1024 * 1024 * 1024 * 1024 * 1024 |
+
+Unit is case-insensitive. `k`, `kib`, `KiB`, `kiB` are all acceptable.
+
+Here are two examples
+
+```properties
+# 1G bytes = 1_000_000_000 bytes
+druid.cache.sizeInBytes=1g
+```
+
+```properties
+# 256MiB bytes = 256 * 1024 * 1024 bytes
+druid.cache.sizeInBytes=256MiB
+```
+
+
+
diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index 01480be..8e363fd 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -879,7 +879,7 @@ The following configs only apply if the Overlord is running
in remote mode. For
|`druid.indexer.runner.taskAssignmentTimeout`|How long to wait after a task as
been assigned to a MiddleManager before throwing an error.|PT5M|
|`druid.indexer.runner.minWorkerVersion`|The minimum MiddleManager version to
send tasks to. |"0"|
|`druid.indexer.runner.compressZnodes`|Indicates whether or not the Overlord
should expect MiddleManagers to compress Znodes.|true|
-|`druid.indexer.runner.maxZnodeBytes`|The maximum size Znode in bytes that can
be created in Zookeeper.|524288|
+|`druid.indexer.runner.maxZnodeBytes`|The maximum size Znode in bytes that can
be created in Zookeeper. Human-readable format is supported, see
[here](human-readable-byte.md). | 512 KiB |
|`druid.indexer.runner.taskCleanupTimeout`|How long to wait before failing a
task after a MiddleManager is disconnected from Zookeeper.|PT15M|
|`druid.indexer.runner.taskShutdownLinkTimeout`|How long to wait on a shutdown
request to a MiddleManager before timing out|PT1M|
|`druid.indexer.runner.pendingTasksRunnerNumThreads`|Number of threads to
allocate pending-tasks to workers, must be at least 1.|1|
@@ -1177,7 +1177,7 @@ Processing properties set on the Middlemanager will be
passed through to Peons.
|Property|Description|Default|
|--------|-----------|-------|
-|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the
storage of intermediate results. The computation engine in both the Historical
and Realtime processes will use a scratch buffer of this size to do all of
their intermediate computations off-heap. Larger values allow for more
aggregations in a single pass over the data while smaller values can require
more passes depending on the query that is being executed.|auto (max 1GB)|
+|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the
storage of intermediate results. The computation engine in both the Historical
and Realtime processes will use a scratch buffer of this size to do all of
their intermediate computations off-heap. Larger values allow for more
aggregations in a single pass over the data while smaller values can require
more passes depending on the query that is being executed. Human-readable
format is supported, see [here](human-read [...]
|`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the
buffers for later use, this is the maximum count cache will grow to. note that
pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE|
|`druid.processing.formatString`|Realtime and Historical processes use this
format string to name their processing threads.|processing-%s|
|`druid.processing.numMergeBuffers`|The number of direct memory buffers
available for merging query results. The buffers are sized by
`druid.processing.buffer.sizeBytes`. This property is effectively a concurrency
limit for queries that require merging buffers. If you are using any queries
that require merge buffers (currently, just groupBy v2) then you should have at
least two of these.|`max(2, druid.processing.numThreads / 4)`|
@@ -1370,7 +1370,7 @@ These Historical configurations can be defined in the
`historical/runtime.proper
|Property|Description|Default|
|--------|-----------|-------|
-|`druid.server.maxSize`|The maximum number of bytes-worth of segments that the
process wants assigned to it. The Coordinator process will attempt to assign
segments to a Historical process only if this property is greater than the
total size of segments served by it. Since this property defines the upper
limit on the total segment size that can be assigned to a Historical, it can be
set to the sum of all `maxSize` values specified within
`druid.segmentCache.locations` property.|0|
+|`druid.server.maxSize`|The maximum number of bytes-worth of segments that the
process wants assigned to it. The Coordinator process will attempt to assign
segments to a Historical process only if this property is greater than the
total size of segments served by it. Since this property defines the upper
limit on the total segment size that can be assigned to a Historical, it can be
set to the sum of all `maxSize` values specified within
`druid.segmentCache.locations` property. Human-rea [...]
|`druid.server.tier`| A string to name the distribution tier that the storage
process belongs to. Many of the [rules Coordinator processes
use](../operations/rule-configuration.md) to manage segments can be keyed on
tiers. | `_default_tier` |
|`druid.server.priority`|In a tiered architecture, the priority of the tier,
thus allowing control over which processes are queried. Higher numbers mean
higher priority. The default (no priority) works for architecture with no cross
replication (tiers that have no data-storage overlap). Data centers typically
have equal priority. | 0 |
@@ -1378,7 +1378,7 @@ These Historical configurations can be defined in the
`historical/runtime.proper
|Property|Description|Default|
|--------|-----------|-------|
-|`druid.segmentCache.locations`|Segments assigned to a Historical process are
first stored on the local file system (in a disk cache) and then served by the
Historical process. These locations define where that local cache resides. This
value cannot be NULL or EMPTY. Here is an example
`druid.segmentCache.locations=[{"path": "/mnt/druidSegments", "maxSize": 10000,
"freeSpacePercent": 1.0}]`. "freeSpacePercent" is optional, if provided then
enforces that much of free disk partition space [...]
+|`druid.segmentCache.locations`|Segments assigned to a Historical process are
first stored on the local file system (in a disk cache) and then served by the
Historical process. These locations define where that local cache resides. This
value cannot be NULL or EMPTY. Here is an example
`druid.segmentCache.locations=[{"path": "/mnt/druidSegments", "maxSize": "10k",
"freeSpacePercent": 1.0}]`. "freeSpacePercent" is optional, if provided then
enforces that much of free disk partition space [...]
|`druid.segmentCache.locationSelectorStrategy`|The strategy used to select a
location from the configured `druid.segmentCache.locations` for segment
distribution. Possible values are `leastBytesUsed`, `roundRobin`, `random`, or
`mostAvailableSize`. |leastBytesUsed|
|`druid.segmentCache.deleteOnRemove`|Delete segment files from cache once a
process is no longer serving a segment.|true|
|`druid.segmentCache.dropSegmentDelayMillis`|How long a process delays before
completely dropping segment.|30000 (30 seconds)|
@@ -1552,7 +1552,7 @@ Druid uses Jetty to serve HTTP requests. Each query being
processed consumes a s
|`druid.server.http.maxIdleTime`|The Jetty max idle time for a
connection.|PT5M|
|`druid.server.http.enableRequestLimit`|If enabled, no requests would be
queued in jetty queue and "HTTP 429 Too Many Requests" error response would be
sent. |false|
|`druid.server.http.defaultQueryTimeout`|Query timeout in millis, beyond which
unfinished queries will be cancelled|300000|
-|`druid.server.http.maxScatterGatherBytes`|Maximum number of bytes gathered
from data processes such as Historicals and realtime processes to execute a
query. Queries that exceed this limit will fail. This is an advance
configuration that allows to protect in case Broker is under heavy load and not
utilizing the data gathered in memory fast enough and leading to OOMs. This
limit can be further reduced at query time using `maxScatterGatherBytes` in the
context. Note that having large limi [...]
+|`druid.server.http.maxScatterGatherBytes`|Maximum number of bytes gathered
from data processes such as Historicals and realtime processes to execute a
query. Queries that exceed this limit will fail. This is an advance
configuration that allows to protect in case Broker is under heavy load and not
utilizing the data gathered in memory fast enough and leading to OOMs. This
limit can be further reduced at query time using `maxScatterGatherBytes` in the
context. Note that having large limi [...]
|`druid.server.http.maxSubqueryRows`|Maximum number of rows from subqueries
per query. These rows are stored in memory.|100000|
|`druid.server.http.gracefulShutdownTimeout`|The maximum amount of time Jetty
waits after receiving shutdown signal. After this timeout the threads will be
forcefully shutdown. This allows any queries that are executing to
complete.|`PT0S` (do not wait)|
|`druid.server.http.unannouncePropagationDelay`|How long to wait for zookeeper
unannouncements to propagate before shutting down Jetty. This is a minimum and
`druid.server.http.gracefulShutdownTimeout` does not start counting down until
after this period elapses.|`PT0S` (do not wait)|
@@ -1570,7 +1570,7 @@ client has the following configuration options.
|`druid.broker.http.compressionCodec`|Compression codec the Broker uses to
communicate with Historical and real-time processes. May be "gzip" or
"identity".|`gzip`|
|`druid.broker.http.readTimeout`|The timeout for data reads from Historical
servers and real-time tasks.|`PT15M`|
|`druid.broker.http.unusedConnectionTimeout`|The timeout for idle connections
in connection pool. The connection in the pool will be closed after this
timeout and a new one will be established. This timeout should be less than
`druid.broker.http.readTimeout`. Set this timeout = ~90% of
`druid.broker.http.readTimeout`|`PT4M`|
-|`druid.broker.http.maxQueuedBytes`|Maximum number of bytes queued per query
before exerting backpressure on the channel to the data server. Similar to
`druid.server.http.maxScatterGatherBytes`, except unlike that configuration,
this one will trigger backpressure rather than query failure. Zero means
disabled. Can be overridden by the ["maxQueuedBytes" query context
parameter](../querying/query-context.md).|`0` (disabled)|
+|`druid.broker.http.maxQueuedBytes`|Maximum number of bytes queued per query
before exerting backpressure on the channel to the data server. Similar to
`druid.server.http.maxScatterGatherBytes`, except unlike that configuration,
this one will trigger backpressure rather than query failure. Zero means
disabled. Can be overridden by the ["maxQueuedBytes" query context
parameter](../querying/query-context.md). Human-readable format is supported,
see [here](human-readable-byte.md). |`0` (disabled)|
|`druid.broker.http.numMaxThreads`|`Maximum number of I/O worker
threads|max(10, ((number of cores * 17) / 16 + 2) + 30)`|
##### Retry Policy
@@ -1587,7 +1587,7 @@ The broker uses processing configs for nested groupBy
queries.
|Property|Description|Default|
|--------|-----------|-------|
-|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the
storage of intermediate results. The computation engine in both the Historical
and Realtime processes will use a scratch buffer of this size to do all of
their intermediate computations off-heap. Larger values allow for more
aggregations in a single pass over the data while smaller values can require
more passes depending on the query that is being executed.|auto (max 1GB)|
+|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the
storage of intermediate results. The computation engine in both the Historical
and Realtime processes will use a scratch buffer of this size to do all of
their intermediate computations off-heap. Larger values allow for more
aggregations in a single pass over the data while smaller values can require
more passes depending on the query that is being executed. It can be configured
as described in [here](human-readabl [...]
|`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the
buffers for later use, this is the maximum count cache will grow to. note that
pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE|
|`druid.processing.formatString`|Realtime and Historical processes use this
format string to name their processing threads.|processing-%s|
|`druid.processing.numMergeBuffers`|The number of direct memory buffers
available for merging query results. The buffers are sized by
`druid.processing.buffer.sizeBytes`. This property is effectively a concurrency
limit for queries that require merging buffers. If you are using any queries
that require merge buffers (currently, just groupBy v2) then you should have at
least two of these.|`max(2, druid.processing.numThreads / 4)`|
@@ -1709,7 +1709,7 @@ Below are the configuration options known to this module:
|`runtime.properties`|Description|Default|
|--------------------|-----------|-------|
|`druid.cache.type`| Set this to `caffeine` or leave out parameter|`caffeine`|
-|`druid.cache.sizeInBytes`|The maximum size of the cache in bytes on
heap.|min(1GB, Runtime.maxMemory / 10)|
+|`druid.cache.sizeInBytes`|The maximum size of the cache in bytes on heap. It
can be configured as described in [here](human-readable-byte.md). |min(1GiB,
Runtime.maxMemory / 10)|
|`druid.cache.expireAfter`|The time (in ms) after an access for which a cache
entry may be expired|None (no time limit)|
|`druid.cache.cacheExecutorFactory`|The executor factory to use for Caffeine
maintenance. One of `COMMON_FJP`, `SINGLE_THREAD`, or
`SAME_THREAD`|ForkJoinPool common pool (`COMMON_FJP`)|
|`druid.cache.evictOnClose`|If a close of a namespace (ex: removing a segment
from a process) should cause an eager eviction of associated cache
values|`false`|
diff --git a/examples/conf/druid/cluster/data/historical/runtime.properties
b/examples/conf/druid/cluster/data/historical/runtime.properties
index 326e6ee..5f6cc13 100644
--- a/examples/conf/druid/cluster/data/historical/runtime.properties
+++ b/examples/conf/druid/cluster/data/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=60
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=4
druid.processing.numThreads=15
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=256000000
+druid.cache.sizeInBytes=256MiB
diff --git a/examples/conf/druid/cluster/data/middleManager/runtime.properties
b/examples/conf/druid/cluster/data/middleManager/runtime.properties
index 4101ebf..564b98e 100644
--- a/examples/conf/druid/cluster/data/middleManager/runtime.properties
+++ b/examples/conf/druid/cluster/data/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=60
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git a/examples/conf/druid/cluster/query/broker/runtime.properties
b/examples/conf/druid/cluster/query/broker/runtime.properties
index 6873025..eda4391 100644
--- a/examples/conf/druid/cluster/query/broker/runtime.properties
+++ b/examples/conf/druid/cluster/query/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=10000000
+druid.broker.http.maxQueuedBytes=10MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=6
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git a/examples/conf/druid/single-server/large/broker/runtime.properties
b/examples/conf/druid/single-server/large/broker/runtime.properties
index d32929c..e7338a6 100644
--- a/examples/conf/druid/single-server/large/broker/runtime.properties
+++ b/examples/conf/druid/single-server/large/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=10000000
+druid.broker.http.maxQueuedBytes=10MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=16
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git
a/examples/conf/druid/single-server/large/historical/runtime.properties
b/examples/conf/druid/single-server/large/historical/runtime.properties
index 540fba6..137b535 100644
--- a/examples/conf/druid/single-server/large/historical/runtime.properties
+++ b/examples/conf/druid/single-server/large/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=60
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=8
druid.processing.numThreads=31
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=512000000
+druid.cache.sizeInBytes=512MiB
diff --git
a/examples/conf/druid/single-server/large/middleManager/runtime.properties
b/examples/conf/druid/single-server/large/middleManager/runtime.properties
index 0583b52..2c1cf07 100644
--- a/examples/conf/druid/single-server/large/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/large/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=60
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git a/examples/conf/druid/single-server/medium/broker/runtime.properties
b/examples/conf/druid/single-server/medium/broker/runtime.properties
index 5681b8a..b3f4e5f 100644
--- a/examples/conf/druid/single-server/medium/broker/runtime.properties
+++ b/examples/conf/druid/single-server/medium/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=10000000
+druid.broker.http.maxQueuedBytes=10MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=4
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git
a/examples/conf/druid/single-server/medium/historical/runtime.properties
b/examples/conf/druid/single-server/medium/historical/runtime.properties
index 326e6ee..5f6cc13 100644
--- a/examples/conf/druid/single-server/medium/historical/runtime.properties
+++ b/examples/conf/druid/single-server/medium/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=60
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=4
druid.processing.numThreads=15
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=256000000
+druid.cache.sizeInBytes=256MiB
diff --git
a/examples/conf/druid/single-server/medium/middleManager/runtime.properties
b/examples/conf/druid/single-server/medium/middleManager/runtime.properties
index 4101ebf..564b98e 100644
--- a/examples/conf/druid/single-server/medium/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/medium/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=60
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git
a/examples/conf/druid/single-server/micro-quickstart/broker/runtime.properties
b/examples/conf/druid/single-server/micro-quickstart/broker/runtime.properties
index cad3373..735a6c3 100644
---
a/examples/conf/druid/single-server/micro-quickstart/broker/runtime.properties
+++
b/examples/conf/druid/single-server/micro-quickstart/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=12
# HTTP client settings
druid.broker.http.numConnections=10
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=5MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=100000000
+druid.processing.buffer.sizeBytes=100MiB
druid.processing.numMergeBuffers=2
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git
a/examples/conf/druid/single-server/micro-quickstart/historical/runtime.properties
b/examples/conf/druid/single-server/micro-quickstart/historical/runtime.properties
index 594abaf..c60d966 100644
---
a/examples/conf/druid/single-server/micro-quickstart/historical/runtime.properties
+++
b/examples/conf/druid/single-server/micro-quickstart/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=12
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=200000000
+druid.processing.buffer.sizeBytes=200MiB
druid.processing.numMergeBuffers=2
druid.processing.numThreads=2
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=10000000
+druid.cache.sizeInBytes=10MiB
diff --git
a/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
b/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
index 280787b..1f8ae46 100644
---
a/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
+++
b/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=12
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git
a/examples/conf/druid/single-server/nano-quickstart/broker/runtime.properties
b/examples/conf/druid/single-server/nano-quickstart/broker/runtime.properties
index 21805ea..f88fcbd 100644
---
a/examples/conf/druid/single-server/nano-quickstart/broker/runtime.properties
+++
b/examples/conf/druid/single-server/nano-quickstart/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=6
# HTTP client settings
druid.broker.http.numConnections=5
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=5MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=50000000
+druid.processing.buffer.sizeBytes=50MiB
druid.processing.numMergeBuffers=2
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git
a/examples/conf/druid/single-server/nano-quickstart/historical/runtime.properties
b/examples/conf/druid/single-server/nano-quickstart/historical/runtime.properties
index f2e8d3b..b467208 100644
---
a/examples/conf/druid/single-server/nano-quickstart/historical/runtime.properties
+++
b/examples/conf/druid/single-server/nano-quickstart/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=6
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=50000000
+druid.processing.buffer.sizeBytes=50MiB
druid.processing.numMergeBuffers=2
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=50000000
+druid.cache.sizeInBytes=50MiB
diff --git
a/examples/conf/druid/single-server/nano-quickstart/middleManager/runtime.properties
b/examples/conf/druid/single-server/nano-quickstart/middleManager/runtime.properties
index c8e5f57..85e8c4c 100644
---
a/examples/conf/druid/single-server/nano-quickstart/middleManager/runtime.properties
+++
b/examples/conf/druid/single-server/nano-quickstart/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=6
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=25000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=25MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git a/examples/conf/druid/single-server/small/broker/runtime.properties
b/examples/conf/druid/single-server/small/broker/runtime.properties
index 072c9e9..238ed1e 100644
--- a/examples/conf/druid/single-server/small/broker/runtime.properties
+++ b/examples/conf/druid/single-server/small/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=50
# HTTP client settings
druid.broker.http.numConnections=40
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=5MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=2
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git
a/examples/conf/druid/single-server/small/historical/runtime.properties
b/examples/conf/druid/single-server/small/historical/runtime.properties
index 6cfc704..9d2e290 100644
--- a/examples/conf/druid/single-server/small/historical/runtime.properties
+++ b/examples/conf/druid/single-server/small/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=50
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=2
druid.processing.numThreads=7
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=256000000
+druid.cache.sizeInBytes=256MiB
diff --git
a/examples/conf/druid/single-server/small/middleManager/runtime.properties
b/examples/conf/druid/single-server/small/middleManager/runtime.properties
index f9a8bae..dd902d3 100644
--- a/examples/conf/druid/single-server/small/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/small/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=50
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git a/examples/conf/druid/single-server/xlarge/broker/runtime.properties
b/examples/conf/druid/single-server/xlarge/broker/runtime.properties
index d32929c..e7338a6 100644
--- a/examples/conf/druid/single-server/xlarge/broker/runtime.properties
+++ b/examples/conf/druid/single-server/xlarge/broker/runtime.properties
@@ -25,10 +25,10 @@ druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=10000000
+druid.broker.http.maxQueuedBytes=10MiB
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=16
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
diff --git
a/examples/conf/druid/single-server/xlarge/historical/runtime.properties
b/examples/conf/druid/single-server/xlarge/historical/runtime.properties
index c322fda..401acbb 100644
--- a/examples/conf/druid/single-server/xlarge/historical/runtime.properties
+++ b/examples/conf/druid/single-server/xlarge/historical/runtime.properties
@@ -24,17 +24,17 @@ druid.plaintextPort=8083
druid.server.http.numThreads=60
# Processing threads and buffers
-druid.processing.buffer.sizeBytes=500000000
+druid.processing.buffer.sizeBytes=500MiB
druid.processing.numMergeBuffers=16
druid.processing.numThreads=63
druid.processing.tmpDir=var/druid/processing
# Segment storage
-druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
-druid.server.maxSize=300000000000
+druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":"300g"}]
+druid.server.maxSize=300g
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
-druid.cache.sizeInBytes=2000000000
+druid.cache.sizeInBytes=2GiB
diff --git
a/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
b/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
index 28732de..8a5924b 100644
--- a/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
@@ -32,7 +32,7 @@ druid.server.http.numThreads=60
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100MiB
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
diff --git
a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/config/RemoteTaskRunnerConfig.java
b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/config/RemoteTaskRunnerConfig.java
index 9d6ad79..e0de4e3 100644
---
a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/config/RemoteTaskRunnerConfig.java
+++
b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/config/RemoteTaskRunnerConfig.java
@@ -21,6 +21,8 @@ package org.apache.druid.indexing.overlord.config;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.druid.curator.CuratorUtils;
+import org.apache.druid.java.util.common.HumanReadableBytes;
+import org.apache.druid.java.util.common.HumanReadableBytesRange;
import org.joda.time.Period;
import javax.validation.constraints.Max;
@@ -42,8 +44,8 @@ public class RemoteTaskRunnerConfig extends
WorkerTaskRunnerConfig
private Period taskCleanupTimeout = new Period("PT15M");
@JsonProperty
- @Min(10 * 1024)
- private int maxZnodeBytes = CuratorUtils.DEFAULT_MAX_ZNODE_BYTES;
+ @HumanReadableBytesRange(min = 10 * 1024, max = Integer.MAX_VALUE)
+ private HumanReadableBytes maxZnodeBytes =
HumanReadableBytes.valueOf(CuratorUtils.DEFAULT_MAX_ZNODE_BYTES);
@JsonProperty
private Period taskShutdownLinkTimeout = new Period("PT1M");
@@ -81,7 +83,7 @@ public class RemoteTaskRunnerConfig extends
WorkerTaskRunnerConfig
public int getMaxZnodeBytes()
{
- return maxZnodeBytes;
+ return maxZnodeBytes.getBytesInInt();
}
public Period getTaskShutdownLinkTimeout()
@@ -89,7 +91,6 @@ public class RemoteTaskRunnerConfig extends
WorkerTaskRunnerConfig
return taskShutdownLinkTimeout;
}
-
public int getPendingTasksRunnerNumThreads()
{
return pendingTasksRunnerNumThreads;
@@ -132,7 +133,7 @@ public class RemoteTaskRunnerConfig extends
WorkerTaskRunnerConfig
RemoteTaskRunnerConfig that = (RemoteTaskRunnerConfig) o;
- if (maxZnodeBytes != that.maxZnodeBytes) {
+ if (!maxZnodeBytes.equals(that.maxZnodeBytes)) {
return false;
}
if (pendingTasksRunnerNumThreads != that.pendingTasksRunnerNumThreads) {
@@ -169,7 +170,7 @@ public class RemoteTaskRunnerConfig extends
WorkerTaskRunnerConfig
int result = taskAssignmentTimeout.hashCode();
result = 31 * result + taskCleanupTimeout.hashCode();
result = 31 * result + getMinWorkerVersion().hashCode();
- result = 31 * result + maxZnodeBytes;
+ result = 31 * result + maxZnodeBytes.getBytesInInt();
result = 31 * result + taskShutdownLinkTimeout.hashCode();
result = 31 * result + pendingTasksRunnerNumThreads;
result = 31 * result + maxRetriesBeforeBlacklist;
diff --git
a/processing/src/main/java/org/apache/druid/query/DruidProcessingConfig.java
b/processing/src/main/java/org/apache/druid/query/DruidProcessingConfig.java
index ce76461..7a9168c 100644
--- a/processing/src/main/java/org/apache/druid/query/DruidProcessingConfig.java
+++ b/processing/src/main/java/org/apache/druid/query/DruidProcessingConfig.java
@@ -19,6 +19,7 @@
package org.apache.druid.query;
+import org.apache.druid.java.util.common.HumanReadableBytes;
import org.apache.druid.java.util.common.concurrent.ExecutorServiceConfig;
import org.apache.druid.java.util.common.guava.ParallelMergeCombiningSequence;
import org.apache.druid.java.util.common.logger.Logger;
@@ -40,14 +41,14 @@ public abstract class DruidProcessingConfig extends
ExecutorServiceConfig implem
private AtomicReference<Integer> computedBufferSizeBytes = new
AtomicReference<>();
@Config({"druid.computation.buffer.size", "${base_path}.buffer.sizeBytes"})
- public int intermediateComputeSizeBytesConfigured()
+ public HumanReadableBytes intermediateComputeSizeBytesConfigured()
{
- return DEFAULT_PROCESSING_BUFFER_SIZE_BYTES;
+ return HumanReadableBytes.valueOf(DEFAULT_PROCESSING_BUFFER_SIZE_BYTES);
}
public int intermediateComputeSizeBytes()
{
- int sizeBytesConfigured = intermediateComputeSizeBytesConfigured();
+ int sizeBytesConfigured =
intermediateComputeSizeBytesConfigured().getBytesInInt();
if (sizeBytesConfigured != DEFAULT_PROCESSING_BUFFER_SIZE_BYTES) {
return sizeBytesConfigured;
} else if (computedBufferSizeBytes.get() != null) {
diff --git
a/server/src/main/java/org/apache/druid/client/DruidServerConfig.java
b/server/src/main/java/org/apache/druid/client/DruidServerConfig.java
index 2d336a7..3b7e8f2 100644
--- a/server/src/main/java/org/apache/druid/client/DruidServerConfig.java
+++ b/server/src/main/java/org/apache/druid/client/DruidServerConfig.java
@@ -21,8 +21,9 @@ package org.apache.druid.client;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.Sets;
+import org.apache.druid.java.util.common.HumanReadableBytes;
+import org.apache.druid.java.util.common.HumanReadableBytesRange;
-import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
import java.util.Set;
@@ -31,8 +32,8 @@ import java.util.Set;
public class DruidServerConfig
{
@JsonProperty
- @Min(0)
- private long maxSize = 0;
+ @HumanReadableBytesRange(min = 0)
+ private HumanReadableBytes maxSize = HumanReadableBytes.ZERO;
@JsonProperty
private String tier = DruidServer.DEFAULT_TIER;
@@ -46,7 +47,7 @@ public class DruidServerConfig
public long getMaxSize()
{
- return maxSize;
+ return maxSize.getBytes();
}
public String getTier()
diff --git
a/server/src/main/java/org/apache/druid/client/cache/CaffeineCacheConfig.java
b/server/src/main/java/org/apache/druid/client/cache/CaffeineCacheConfig.java
index e828a62..8d4ed10 100644
---
a/server/src/main/java/org/apache/druid/client/cache/CaffeineCacheConfig.java
+++
b/server/src/main/java/org/apache/druid/client/cache/CaffeineCacheConfig.java
@@ -20,6 +20,7 @@
package org.apache.druid.client.cache;
import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.druid.java.util.common.HumanReadableBytes;
import java.util.concurrent.Executor;
@@ -29,7 +30,7 @@ public class CaffeineCacheConfig
private long expireAfter = -1;
@JsonProperty
- private long sizeInBytes = -1;
+ private HumanReadableBytes sizeInBytes = HumanReadableBytes.valueOf(-1);
@JsonProperty
// Do not use COMMON_FJP unless you're running 8u60 or higher
@@ -46,7 +47,7 @@ public class CaffeineCacheConfig
public long getSizeInBytes()
{
- return sizeInBytes;
+ return sizeInBytes.getBytes();
}
public Executor createExecutor()
diff --git
a/server/src/main/java/org/apache/druid/guice/http/DruidHttpClientConfig.java
b/server/src/main/java/org/apache/druid/guice/http/DruidHttpClientConfig.java
index 7e09f8e..b475f64 100644
---
a/server/src/main/java/org/apache/druid/guice/http/DruidHttpClientConfig.java
+++
b/server/src/main/java/org/apache/druid/guice/http/DruidHttpClientConfig.java
@@ -20,6 +20,7 @@
package org.apache.druid.guice.http;
import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.druid.java.util.common.HumanReadableBytes;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.utils.JvmUtils;
import org.joda.time.Duration;
@@ -64,7 +65,7 @@ public class DruidHttpClientConfig
* respected by CachingClusteredClient (broker -> data server communication).
*/
@JsonProperty
- private long maxQueuedBytes = 0L;
+ private HumanReadableBytes maxQueuedBytes = HumanReadableBytes.ZERO;
public int getNumConnections()
{
@@ -112,6 +113,6 @@ public class DruidHttpClientConfig
public long getMaxQueuedBytes()
{
- return maxQueuedBytes;
+ return maxQueuedBytes.getBytes();
}
}
diff --git
a/server/src/main/java/org/apache/druid/segment/loading/StorageLocationConfig.java
b/server/src/main/java/org/apache/druid/segment/loading/StorageLocationConfig.java
index bbfd7ed..ee5196b 100644
---
a/server/src/main/java/org/apache/druid/segment/loading/StorageLocationConfig.java
+++
b/server/src/main/java/org/apache/druid/segment/loading/StorageLocationConfig.java
@@ -22,6 +22,7 @@ package org.apache.druid.segment.loading;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
+import org.apache.druid.java.util.common.HumanReadableBytes;
import javax.annotation.Nullable;
import java.io.File;
@@ -38,12 +39,21 @@ public class StorageLocationConfig
@JsonCreator
public StorageLocationConfig(
@JsonProperty("path") File path,
- @JsonProperty("maxSize") @Nullable Long maxSize,
+ @JsonProperty("maxSize") @Nullable HumanReadableBytes maxSize,
@JsonProperty("freeSpacePercent") @Nullable Double freeSpacePercent
)
{
+ this(path, maxSize == null ? Long.MAX_VALUE : maxSize.getBytes(),
freeSpacePercent);
+ }
+
+ public StorageLocationConfig(
+ File path,
+ long maxSize,
+ Double freeSpacePercent
+ )
+ {
this.path = Preconditions.checkNotNull(path, "path");
- this.maxSize = maxSize == null ? Long.MAX_VALUE : maxSize;
+ this.maxSize = maxSize;
this.freeSpacePercent = freeSpacePercent;
Preconditions.checkArgument(this.maxSize > 0, "maxSize[%s] should be
positive", this.maxSize);
Preconditions.checkArgument(
diff --git
a/server/src/main/java/org/apache/druid/server/initialization/ServerConfig.java
b/server/src/main/java/org/apache/druid/server/initialization/ServerConfig.java
index e4506f0..ac9576b 100644
---
a/server/src/main/java/org/apache/druid/server/initialization/ServerConfig.java
+++
b/server/src/main/java/org/apache/druid/server/initialization/ServerConfig.java
@@ -21,6 +21,8 @@ package org.apache.druid.server.initialization;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableList;
+import org.apache.druid.java.util.common.HumanReadableBytes;
+import org.apache.druid.java.util.common.HumanReadableBytesRange;
import org.apache.druid.utils.JvmUtils;
import org.joda.time.Period;
@@ -67,7 +69,7 @@ public class ServerConfig
this.enableRequestLimit = enableRequestLimit;
this.maxIdleTime = maxIdleTime;
this.defaultQueryTimeout = defaultQueryTimeout;
- this.maxScatterGatherBytes = maxScatterGatherBytes;
+ this.maxScatterGatherBytes =
HumanReadableBytes.valueOf(maxScatterGatherBytes);
this.maxSubqueryRows = maxSubqueryRows;
this.maxQueryTimeout = maxQueryTimeout;
this.maxRequestHeaderSize = maxRequestHeaderSize;
@@ -104,8 +106,9 @@ public class ServerConfig
private long defaultQueryTimeout = TimeUnit.MINUTES.toMillis(5);
@JsonProperty
- @Min(1)
- private long maxScatterGatherBytes = Long.MAX_VALUE;
+ @NotNull
+ @HumanReadableBytesRange(min = 1)
+ private HumanReadableBytes maxScatterGatherBytes =
HumanReadableBytes.valueOf(Long.MAX_VALUE);
@JsonProperty
@Min(1)
@@ -169,7 +172,7 @@ public class ServerConfig
public long getMaxScatterGatherBytes()
{
- return maxScatterGatherBytes;
+ return maxScatterGatherBytes.getBytes();
}
public int getMaxSubqueryRows()
@@ -232,7 +235,7 @@ public class ServerConfig
queueSize == that.queueSize &&
enableRequestLimit == that.enableRequestLimit &&
defaultQueryTimeout == that.defaultQueryTimeout &&
- maxScatterGatherBytes == that.maxScatterGatherBytes &&
+ maxScatterGatherBytes.equals(that.maxScatterGatherBytes) &&
maxSubqueryRows == that.maxSubqueryRows &&
maxQueryTimeout == that.maxQueryTimeout &&
maxRequestHeaderSize == that.maxRequestHeaderSize &&
diff --git a/website/.spelling b/website/.spelling
index ad34858..50e27f0 100644
--- a/website/.spelling
+++ b/website/.spelling
@@ -1555,6 +1555,9 @@ dsql
- ../docs/tutorials/tutorial-update-data.md
bear-111
- ../docs/configuration/index.md
+1GiB
+KiB
+GiB
00.000Z
100ms
10ms
@@ -1771,3 +1774,20 @@ CVE-2019-17571
CVE-2019-12399
CVE-2018-17196
bin.tar.gz
+ - ../docs/configuration/human-readable-byte.md
+0s
+1T
+3G
+1_000
+1_000_000
+1_000_000_000
+1_000_000_000_000
+1_000_000_000_000_000
+Giga
+Tera
+Peta
+KiB
+MiB
+GiB
+TiB
+PiB
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]