thomasrebele commented on code in PR #6293:
URL: https://github.com/apache/hive/pull/6293#discussion_r2762868297
##########
ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java:
##########
@@ -184,91 +188,284 @@ public Double visitCall(RexCall call) {
return selectivity;
}
+ /**
+ * If the cast can be removed, just return its operand and adjust the
boundaries if necessary.
+ *
+ * <p>
+ * In Hive, if a value cannot be represented by the cast, the result of
the cast is NULL,
+ * and therefore cannot fulfill the predicate. So the possible range of
the values
+ * is limited by the range of possible values of the type.
+ * </p>
+ *
+ * <p>
+ * Special care is taken to support the cast to DECIMAL(precision, scale):
+ * The cast to DECIMAL rounds the value the same way as {@link
RoundingMode#HALF_UP}.
+ * The boundaries are adjusted accordingly, without changing the semantics
of <code>inclusive</code>.
+ * </p>
+ *
+ * @param cast a RexCall of type {@link SqlKind#CAST}
+ * @param tableScan the table that provides the statistics
+ * @param boundaries indexes 0 and 1 are the boundaries of the range
predicate;
+ * indexes 2 and 3, if they exist, will be set to the
boundaries of the type range
+ * @param inclusive whether the respective boundary is inclusive or
exclusive.
+ * @return the operand if the cast can be removed, otherwise the cast itself
+ */
+ private RexNode removeCastIfPossible(RexCall cast, HiveTableScan tableScan,
float[] boundaries, boolean[] inclusive) {
+ RexNode op0 = cast.getOperands().getFirst();
+ if (!(op0 instanceof RexInputRef)) {
+ return cast;
+ }
+ int index = ((RexInputRef) op0).getIndex();
+ final List<ColStatistics> colStats =
tableScan.getColStat(Collections.singletonList(index));
+ if (colStats.isEmpty()) {
+ return cast;
+ }
+
+ // we need to check that the possible values of the input to the cast are
all within the type range of the cast
+ // otherwise the CAST introduces some modulo-like behavior (*)
+ ColStatistics colStat = colStats.getFirst();
+ ColStatistics.Range range = colStat.getRange();
+ if (range == null)
+ return cast;
+ if (range.minValue == null || Double.isNaN(range.minValue.doubleValue()))
+ return cast;
+ if (range.maxValue == null || Double.isNaN(range.maxValue.doubleValue()))
+ return cast;
Review Comment:
I'll fix the `'if' construct must use '{}'s` checkstyle warnings when
updating the commit.
##########
ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/stats/TestFilterSelectivityEstimator.java:
##########
@@ -511,6 +599,215 @@ public void
testComputeRangePredicateSelectivityNotBetweenWithNULLS() {
doReturn(Collections.singletonList(stats)).when(tableMock).getColStat(Collections.singletonList(0));
RexNode filter = REX_BUILDER.makeCall(HiveBetween.INSTANCE, boolTrue,
inputRef0, int1, int3);
FilterSelectivityEstimator estimator = new
FilterSelectivityEstimator(scan, mq);
- Assert.assertEquals(0.55, estimator.estimateSelectivity(filter), DELTA);
+ // only the values 4, 5, 6, 7 fulfill the condition NOT BETWEEN 1 AND 3
+ // (the NULL values do not fulfill the condition)
+ Assert.assertEquals(0.2, estimator.estimateSelectivity(filter), DELTA);
+ }
+
+ @Test
+ public void testComputeRangePredicateSelectivityWithCast() {
+ useFieldWithValues("f_numeric", VALUES, KLL);
+ checkSelectivity(3 / 13.f, castAndCompare(TINYINT, GE, int5));
+ checkSelectivity(10 / 13.f, castAndCompare(TINYINT, LT, int5));
+ checkSelectivity(2 / 13.f, castAndCompare(TINYINT, GT, int5));
+ checkSelectivity(11 / 13.f, castAndCompare(TINYINT, LE, int5));
+
+ checkSelectivity(12 / 13f, castAndCompare(TINYINT, GE, int2));
+ checkSelectivity(1 / 13f, castAndCompare(TINYINT, LT, int2));
+ checkSelectivity(5 / 13f, castAndCompare(TINYINT, GT, int2));
+ checkSelectivity(8 / 13f, castAndCompare(TINYINT, LE, int2));
+
+ // check some types
+ checkSelectivity(3 / 13.f, castAndCompare(INTEGER, GE, int5));
+ checkSelectivity(3 / 13.f, castAndCompare(BIGINT, GE, int5));
+ checkSelectivity(3 / 13.f, castAndCompare(FLOAT, GE, int5));
+ checkSelectivity(3 / 13.f, castAndCompare(DOUBLE, GE, int5));
+ }
+
+ @Test
+ public void testComputeRangePredicateSelectivityWithCast2() {
+ useFieldWithValues("f_numeric", VALUES2, KLL2);
+ checkSelectivity(4 / 28.f, castAndCompare(DECIMAL_3_1, GE,
literalFloat(1)));
+
+ // values from -99.94999 to 99.94999 (both inclusive)
+ checkSelectivity(7 / 28.f, castAndCompare(DECIMAL_3_1, LT,
literalFloat(100)));
+ checkSelectivity(7 / 28.f, castAndCompare(DECIMAL_3_1, LE,
literalFloat(100)));
+ checkSelectivity(0 / 28.f, castAndCompare(DECIMAL_3_1, GT,
literalFloat(100)));
+ checkSelectivity(0 / 28.f, castAndCompare(DECIMAL_3_1, GE,
literalFloat(100)));
+
+ checkSelectivity(10 / 28.f, castAndCompare(DECIMAL_4_1, LT,
literalFloat(100)));
+ checkSelectivity(20 / 28.f, castAndCompare(DECIMAL_4_1, LE,
literalFloat(100)));
+ checkSelectivity(3 / 28.f, castAndCompare(DECIMAL_4_1, GT,
literalFloat(100)));
+ checkSelectivity(13 / 28.f, castAndCompare(DECIMAL_4_1, GE,
literalFloat(100)));
+
+ checkSelectivity(2 / 28.f, castAndCompare(DECIMAL_2_1, LT,
literalFloat(100)));
+ checkSelectivity(2 / 28.f, castAndCompare(DECIMAL_2_1, LE,
literalFloat(100)));
+ checkSelectivity(0 / 28.f, castAndCompare(DECIMAL_2_1, GT,
literalFloat(100)));
+ checkSelectivity(0 / 28.f, castAndCompare(DECIMAL_2_1, GE,
literalFloat(100)));
+
+ // expected: 100_000f
+ checkSelectivity(1 / 28.f, castAndCompare(DECIMAL_7_1, GT,
literalFloat(10000)));
+
+ // expected: 10_000f, 100_000f, because CAST(1_000_000 AS DECIMAL(7,1)) =
NULL, and similar for even larger values
+ checkSelectivity(2 / 28.f, castAndCompare(DECIMAL_7_1, GE,
literalFloat(9999)));
+ checkSelectivity(2 / 28.f, castAndCompare(DECIMAL_7_1, GE,
literalFloat(10000)));
+
+ // expected: 100_000f
+ checkSelectivity(1 / 28.f, castAndCompare(DECIMAL_7_1, GT,
literalFloat(10000)));
+ checkSelectivity(1 / 28.f, castAndCompare(DECIMAL_7_1, GT,
literalFloat(10001)));
+
+ // expected 1f, 10f, 99.94998f, 99.94999f
+ checkSelectivity(4 / 28.f, castAndCompare(DECIMAL_3_1, GE,
literalFloat(1)));
+ checkSelectivity(3 / 28.f, castAndCompare(DECIMAL_3_1, GT,
literalFloat(1)));
+ // expected -99.94999f, -99.94998f, 0f, 1f
+ checkSelectivity(4 / 28.f, castAndCompare(DECIMAL_3_1, LE,
literalFloat(1)));
+ checkSelectivity(3 / 28.f, castAndCompare(DECIMAL_3_1, LT,
literalFloat(1)));
+
+ // the cast would apply a modulo operation to the values outside the range
of the cast
+ // so instead a default selectivity should be returned
+ checkSelectivity(1 / 3.f, castAndCompare(TINYINT, LT, literalFloat(100)));
+ checkSelectivity(1 / 3.f, castAndCompare(TINYINT, LT, literalFloat(100)));
+ }
+
+ @Test
+ public void testComputeRangePredicateSelectivityTimestamp() {
+ useFieldWithValues("f_timestamp", VALUES_TIME, KLL_TIME);
+
+ checkSelectivity(5 / 7.f, REX_BUILDER.makeCall(GE, currentInputRef,
literalTimestamp("2020-11-03")));
+ checkSelectivity(4 / 7.f, REX_BUILDER.makeCall(GT, currentInputRef,
literalTimestamp("2020-11-03")));
+ checkSelectivity(5 / 7.f, REX_BUILDER.makeCall(LE, currentInputRef,
literalTimestamp("2020-11-05T11:23:45Z")));
+ checkSelectivity(4 / 7.f, REX_BUILDER.makeCall(LT, currentInputRef,
literalTimestamp("2020-11-05T11:23:45Z")));
}
+
+ @Test
+ public void testComputeRangePredicateSelectivityDate() {
+ useFieldWithValues("f_date", VALUES_TIME, KLL_TIME);
+
+ checkSelectivity(5 / 7.f, REX_BUILDER.makeCall(GE, currentInputRef,
literalDate("2020-11-03")));
+ checkSelectivity(4 / 7.f, REX_BUILDER.makeCall(GT, currentInputRef,
literalDate("2020-11-03")));
+ checkSelectivity(4 / 7.f, REX_BUILDER.makeCall(LE, currentInputRef,
literalDate("2020-11-05")));
+ checkSelectivity(4 / 7.f, REX_BUILDER.makeCall(LT, currentInputRef,
literalDate("2020-11-05")));
+ }
+
+ @Test
+ public void testComputeRangePredicateSelectivityBetweenWithCast() {
+ useFieldWithValues("f_numeric", VALUES2, KLL2);
+ float total = VALUES2.length;
+
+ {
Review Comment:
Checkstyle warns about nested blocks, I'll refactor this when updating the
PR.
##########
ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java:
##########
@@ -184,91 +188,284 @@ public Double visitCall(RexCall call) {
return selectivity;
}
+ /**
+ * If the cast can be removed, just return its operand and adjust the
boundaries if necessary.
+ *
+ * <p>
+ * In Hive, if a value cannot be represented by the cast, the result of
the cast is NULL,
+ * and therefore cannot fulfill the predicate. So the possible range of
the values
+ * is limited by the range of possible values of the type.
+ * </p>
+ *
+ * <p>
+ * Special care is taken to support the cast to DECIMAL(precision, scale):
+ * The cast to DECIMAL rounds the value the same way as {@link
RoundingMode#HALF_UP}.
+ * The boundaries are adjusted accordingly, without changing the semantics
of <code>inclusive</code>.
+ * </p>
+ *
+ * @param cast a RexCall of type {@link SqlKind#CAST}
+ * @param tableScan the table that provides the statistics
+ * @param boundaries indexes 0 and 1 are the boundaries of the range
predicate;
+ * indexes 2 and 3, if they exist, will be set to the
boundaries of the type range
+ * @param inclusive whether the respective boundary is inclusive or
exclusive.
+ * @return the operand if the cast can be removed, otherwise the cast itself
+ */
+ private RexNode removeCastIfPossible(RexCall cast, HiveTableScan tableScan,
float[] boundaries, boolean[] inclusive) {
+ RexNode op0 = cast.getOperands().getFirst();
+ if (!(op0 instanceof RexInputRef)) {
+ return cast;
+ }
+ int index = ((RexInputRef) op0).getIndex();
+ final List<ColStatistics> colStats =
tableScan.getColStat(Collections.singletonList(index));
+ if (colStats.isEmpty()) {
+ return cast;
+ }
+
+ // we need to check that the possible values of the input to the cast are
all within the type range of the cast
+ // otherwise the CAST introduces some modulo-like behavior (*)
+ ColStatistics colStat = colStats.getFirst();
+ ColStatistics.Range range = colStat.getRange();
+ if (range == null)
+ return cast;
+ if (range.minValue == null || Double.isNaN(range.minValue.doubleValue()))
+ return cast;
+ if (range.maxValue == null || Double.isNaN(range.maxValue.doubleValue()))
+ return cast;
+
+ String type = cast.getType().getSqlTypeName().getName();
+
+ double min;
+ double max;
+ switch (type.toLowerCase()) {
+ case serdeConstants.TINYINT_TYPE_NAME:
+ min = Byte.MIN_VALUE;
+ max = Byte.MAX_VALUE;
+ break;
+ case serdeConstants.SMALLINT_TYPE_NAME:
+ min = Short.MIN_VALUE;
+ max = Short.MAX_VALUE;
+ break;
+ case serdeConstants.INT_TYPE_NAME, "integer":
+ min = Integer.MIN_VALUE;
+ max = Integer.MAX_VALUE;
+ break;
+ case serdeConstants.BIGINT_TYPE_NAME, serdeConstants.TIMESTAMP_TYPE_NAME:
+ min = Long.MIN_VALUE;
+ max = Long.MAX_VALUE;
+ break;
+ case serdeConstants.FLOAT_TYPE_NAME:
+ min = -Float.MAX_VALUE;
+ max = Float.MAX_VALUE;
+ break;
+ case serdeConstants.DOUBLE_TYPE_NAME:
+ min = -Double.MAX_VALUE;
+ max = Double.MAX_VALUE;
+ break;
+ case serdeConstants.DECIMAL_TYPE_NAME:
+ min = -Double.MAX_VALUE;
+ max = Double.MAX_VALUE;
+ adjustBoundariesForDecimal(cast, boundaries, inclusive);
+ break;
+ default:
+ // unknown type, do not remove the cast
+ return cast;
+ }
+
+ // see (*)
+ if (range.minValue.doubleValue() < min)
+ return cast;
+ if (range.maxValue.doubleValue() > max)
+ return cast;
+
+ return op0;
+ }
+
+ /**
+ * Adjust the boundaries for a DECIMAL cast.
+ * <p>
+ * See {@link #removeCastIfPossible(RexCall, HiveTableScan, float[],
boolean[])}
+ * for an explanation of the parameters.
+ */
+ private static void adjustBoundariesForDecimal(RexCall cast, float[]
boundaries, boolean[] inclusive) {
+ // values outside the representable range are cast to NULL, so adapt the
boundaries
+ int precision = cast.getType().getPrecision();
+ int scale = cast.getType().getScale();
+ int digits = precision - scale;
+ // the cast does some rounding, i.e., CAST(99.9499 AS DECIMAL(3,1)) = 99.9
+ // but CAST(99.95 AS DECIMAL(3,1)) = NULL
+ float adjust = (float) (5 * Math.pow(10, -(scale + 1)));
+ // the range of values supported by the type is interval
[-typeRangeExtent, typeRangeExtent] (both inclusive)
+ // e.g., the typeRangeExt is 99.94999 for DECIMAL(3,1)
+ float typeRangeExtent = Math.nextDown((float) (Math.pow(10, digits) -
adjust));
+
+ // the resulting value of +- adjust would be rounded up, so in some cases
we need to use Math.nextDown
+ float adjusted1 = inclusive[0] ? boundaries[0] - adjust :
Math.nextDown(boundaries[0] + adjust);
+ float adjusted2 = inclusive[1] ? Math.nextDown(boundaries[1] + adjust) :
boundaries[1] - adjust;
+
+ float lowerUniverse = inclusive[0] ? -typeRangeExtent :
Math.nextDown(-typeRangeExtent);
+ float upperUniverse = inclusive[1] ? typeRangeExtent :
Math.nextUp(typeRangeExtent);
+ boundaries[0] = Math.max(adjusted1, lowerUniverse);
+ boundaries[1] = Math.min(adjusted2, upperUniverse);
+ if (boundaries.length >= 4) {
+ boundaries[2] = lowerUniverse;
+ boundaries[3] = upperUniverse;
+ }
+ }
+
private double computeRangePredicateSelectivity(RexCall call, SqlKind op) {
- final boolean isLiteralLeft =
call.getOperands().get(0).getKind().equals(SqlKind.LITERAL);
- final boolean isLiteralRight =
call.getOperands().get(1).getKind().equals(SqlKind.LITERAL);
- final boolean isInputRefLeft =
call.getOperands().get(0).getKind().equals(SqlKind.INPUT_REF);
- final boolean isInputRefRight =
call.getOperands().get(1).getKind().equals(SqlKind.INPUT_REF);
+ double defaultSelectivity = ((double) 1 / (double) 3);
+ if (!(childRel instanceof HiveTableScan)) {
+ return defaultSelectivity;
+ }
- if (childRel instanceof HiveTableScan && isLiteralLeft != isLiteralRight
&& isInputRefLeft != isInputRefRight) {
- final HiveTableScan t = (HiveTableScan) childRel;
- final int inputRefIndex = ((RexInputRef)
call.getOperands().get(isInputRefLeft ? 0 : 1)).getIndex();
- final List<ColStatistics> colStats =
t.getColStat(Collections.singletonList(inputRefIndex));
+ // search for the literal
+ List<RexNode> operands = call.getOperands();
+ final Optional<Float> leftLiteral = extractLiteral(operands.get(0));
+ final Optional<Float> rightLiteral = extractLiteral(operands.get(1));
+ if ((leftLiteral.isPresent()) == (rightLiteral.isPresent())) {
+ return defaultSelectivity;
+ }
+ int literalOpIdx = leftLiteral.isPresent() ? 0 : 1;
+
+ // analyze the predicate
+ float value = leftLiteral.orElseGet(rightLiteral::get);
+ int boundaryIdx;
+ boolean openBound = op == SqlKind.LESS_THAN || op == SqlKind.GREATER_THAN;
+ switch (op) {
+ case LESS_THAN, LESS_THAN_OR_EQUAL:
+ boundaryIdx = literalOpIdx;
+ break;
+ case GREATER_THAN, GREATER_THAN_OR_EQUAL:
+ boundaryIdx = 1 - literalOpIdx;
+ break;
+ default:
+ return defaultSelectivity;
+ }
+ float[] boundaries = new float[] { Float.NEGATIVE_INFINITY,
Float.POSITIVE_INFINITY };
+ boolean[] inclusive = new boolean[] { true, true };
Review Comment:
I'll fix the `'{' is followed by whitespace` warnings when updating the PR.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]