This is an automated email from the ASF dual-hosted git repository. jihoonson pushed a commit to branch 0.18.0 in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/0.18.0 by this push: new 45b89cc Fix potential integer overflow issues (#9609) (#9673) 45b89cc is described below commit 45b89cc14922ff40fb355496cf985996159a41bb Author: Suneet Saldanha <44787917+sunee...@users.noreply.github.com> AuthorDate: Fri Apr 10 15:43:16 2020 -0700 Fix potential integer overflow issues (#9609) (#9673) ApproximateHistogram - seems unlikely SegmentAnalyzer - unclear if this is an actual issue GenericIndexedWriter - unclear if this is an actual issue IncrementalIndexRow and OnheapIncrementalIndex are non-issues becaus it's very unlikely for the number of dims to be large enough to hit the overflow condition --- .../druid/query/aggregation/histogram/ApproximateHistogram.java | 2 +- .../main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java | 4 ++-- .../java/org/apache/druid/segment/data/GenericIndexedWriter.java | 6 +++--- .../org/apache/druid/segment/incremental/IncrementalIndexRow.java | 2 +- .../apache/druid/segment/incremental/OnheapIncrementalIndex.java | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/ApproximateHistogram.java b/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/ApproximateHistogram.java index 35ac0b4..ed67b8d 100644 --- a/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/ApproximateHistogram.java +++ b/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/ApproximateHistogram.java @@ -1557,7 +1557,7 @@ public class ApproximateHistogram final double s = probabilities[j] * this.count(); int i = 0; - int sum = 0; + long sum = 0; int k = 1; long count; while (k <= this.binCount()) { diff --git a/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java b/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java index 15acc4c..c5f1800 100644 --- a/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java +++ b/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java @@ -207,8 +207,8 @@ public class SegmentAnalyzer for (int i = 0; i < cardinality; ++i) { String value = bitmapIndex.getValue(i); if (value != null) { - size += StringUtils.estimatedBinaryLengthAsUTF8(value) * bitmapIndex.getBitmap(bitmapIndex.getIndex(value)) - .size(); + size += StringUtils.estimatedBinaryLengthAsUTF8(value) * + ((long) bitmapIndex.getBitmap(bitmapIndex.getIndex(value)).size()); } } } diff --git a/processing/src/main/java/org/apache/druid/segment/data/GenericIndexedWriter.java b/processing/src/main/java/org/apache/druid/segment/data/GenericIndexedWriter.java index 7578506..95bc141 100644 --- a/processing/src/main/java/org/apache/druid/segment/data/GenericIndexedWriter.java +++ b/processing/src/main/java/org/apache/druid/segment/data/GenericIndexedWriter.java @@ -321,10 +321,10 @@ public class GenericIndexedWriter<T> implements Serializer final long numBytesWritten = headerOut.size() + valuesOut.size(); Preconditions.checkState( - headerOut.size() == (numWritten * 4), + headerOut.size() == (numWritten * 4L), "numWritten[%s] number of rows should have [%s] bytes written to headerOut, had[%s]", numWritten, - numWritten * 4, + numWritten * 4L, headerOut.size() ); Preconditions.checkState( @@ -459,7 +459,7 @@ public class GenericIndexedWriter<T> implements Serializer long relativeRefBytes = 0; long relativeNumBytes; try (SmooshedWriter smooshChannel = smoosher - .addWithSmooshedWriter(generateHeaderFileName(filenameBase), numWritten * Integer.BYTES)) { + .addWithSmooshedWriter(generateHeaderFileName(filenameBase), ((long) numWritten) * Integer.BYTES)) { // following block converts long header indexes into int header indexes. for (int pos = 0; pos < numWritten; pos++) { diff --git a/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexRow.java b/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexRow.java index 5c4d7b6..987ee5f 100644 --- a/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexRow.java +++ b/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexRow.java @@ -126,7 +126,7 @@ public final class IncrementalIndexRow */ public long estimateBytesInMemory() { - long sizeInBytes = Long.BYTES + Integer.BYTES * dims.length + Long.BYTES + Long.BYTES; + long sizeInBytes = Long.BYTES + ((long) Integer.BYTES) * dims.length + Long.BYTES + Long.BYTES; sizeInBytes += dimsKeySize; return sizeInBytes; } diff --git a/processing/src/main/java/org/apache/druid/segment/incremental/OnheapIncrementalIndex.java b/processing/src/main/java/org/apache/druid/segment/incremental/OnheapIncrementalIndex.java index 2d64346..04cdaba 100644 --- a/processing/src/main/java/org/apache/druid/segment/incremental/OnheapIncrementalIndex.java +++ b/processing/src/main/java/org/apache/druid/segment/incremental/OnheapIncrementalIndex.java @@ -105,10 +105,10 @@ public class OnheapIncrementalIndex extends IncrementalIndex<Aggregator> */ private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema) { - long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length; + long maxAggregatorIntermediateSize = ((long) Integer.BYTES) * incrementalIndexSchema.getMetrics().length; maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics()) .mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls() - + Long.BYTES * 2) + + Long.BYTES * 2L) .sum(); return maxAggregatorIntermediateSize; } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@druid.apache.org For additional commands, e-mail: commits-h...@druid.apache.org