Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/19783#discussion_r156282033
--- Diff:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/FilterEstimation.scala
---
@@ -332,8 +332,44 @@ case class FilterEstimation(plan: Filter) extends
Logging {
colStatsMap.update(attr, newStats)
}
- Some(1.0 / BigDecimal(ndv))
- } else {
+ if (colStat.histogram.isEmpty) {
+ // returns 1/ndv if there is no histogram
+ Some(1.0 / BigDecimal(ndv))
+ } else {
+ // We compute filter selectivity using Histogram information.
+ val datum = EstimationUtils.toDecimal(literal.value,
literal.dataType).toDouble
+ val histogram = colStat.histogram.get
+ val hgmBins = histogram.bins
+
+ // find bins where column's current min and max locate. Note that
a column's [min, max]
+ // range may change due to another condition applied earlier.
+ val min = EstimationUtils.toDecimal(colStat.min.get,
literal.dataType).toDouble
+ val max = EstimationUtils.toDecimal(colStat.max.get,
literal.dataType).toDouble
+ val minBinId = EstimationUtils.findFirstBinForValue(min, hgmBins)
--- End diff --
nit: `minBinIndex`
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]