Github user wzhfy commented on a diff in the pull request: https://github.com/apache/spark/pull/19783#discussion_r153972982 --- Diff: sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/FilterEstimation.scala --- @@ -332,8 +332,45 @@ case class FilterEstimation(plan: Filter) extends Logging { colStatsMap.update(attr, newStats) } - Some(1.0 / BigDecimal(ndv)) - } else { + // We compute filter selectivity using Histogram information + attr.dataType match { + case StringType | BinaryType => + Some(1.0 / BigDecimal(ndv)) + + case _ => + // returns 1/ndv if there is no histogram + if (colStat.histogram.isEmpty) return Some(1.0 / BigDecimal(ndv)) + + // We traverse histogram bins to locate the literal value + val hgmBins = colStat.histogram.get.bins + val datum = EstimationUtils.toDecimal(literal.value, literal.dataType).toDouble + // find the interval where this datum locates + var lowerId, higherId = -1 + for (i <- hgmBins.indices) { + // if datum > upperBound, just move to next bin + if (datum <= hgmBins(i).hi && lowerId < 0) lowerId = i + if (higherId < 0) { + if ((datum < hgmBins(i).hi || i == hgmBins.length - 1) || + ((datum == hgmBins(i).hi) && (datum < hgmBins(i + 1).hi))) { + higherId = i + } + } + } + assert(lowerId <= higherId) + val lowerBinNdv = hgmBins(lowerId).ndv + val higherBinNdv = hgmBins(higherId).ndv + // assume uniform distribution in each bin + val percent = if (lowerId == higherId) { + (1.0 / hgmBins.length) / math.max(lowerBinNdv, 1) + } else { + 1.0 / hgmBins.length * (higherId - lowerId - 1) + + (1.0 / hgmBins.length) / math.max(lowerBinNdv, 1) + + (1.0 / hgmBins.length) / math.max(higherBinNdv, 1) --- End diff -- bin's ndv will never be less than 1, right?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org