Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/13494#discussion_r68377541
--- Diff:
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala ---
@@ -457,6 +458,125 @@ private[hive] class
HiveMetastoreCatalog(sparkSession: SparkSession) extends Log
allowExisting)
}
}
+
+ /**
+ * When scanning only partition columns, get results based on metadata
without scanning files.
+ * It is used for distinct or distinct/Max/Min aggregations, example:
max(partition).
+ */
+ object MetadataOnlyOptimizer extends Rule[LogicalPlan] {
+
+ private def canSupportMetadataOnly(a: Aggregate): Boolean = {
+ val aggregateExpressions = a.aggregateExpressions.flatMap { expr =>
+ expr.collect {
+ case agg: AggregateExpression => agg
+ }
+ }.distinct
+ aggregateExpressions.forall { agg =>
+ if (agg.isDistinct) {
+ true
+ } else {
+ agg.aggregateFunction match {
+ case max: Max => true
+ case min: Min => true
+ case _ => false
+ }
+ }
+ }
+ }
+
+ private def findRelation(plan: LogicalPlan): (Option[LogicalPlan],
Seq[Expression]) = {
+ plan match {
+ case relation @ LogicalRelation(files: HadoopFsRelation, _, table)
+ if files.partitionSchema.nonEmpty =>
+ (Some(relation), Seq.empty[Expression])
+
+ case relation: MetastoreRelation if
relation.partitionKeys.nonEmpty =>
+ (Some(relation), Seq.empty[Expression])
+
+ case p @ Project(_, child) =>
+ findRelation(child)
+
+ case f @ Filter(filterCondition, child) =>
+ val (plan, conditions) = findRelation(child)
+ (plan, conditions ++ Seq(filterCondition))
+
+ case SubqueryAlias(_, child) =>
+ findRelation(child)
+
+ case _ => (None, Seq.empty[Expression])
+ }
+ }
+
+ private def convertToMetadataOnlyPlan(
+ parent: LogicalPlan,
+ project: Option[LogicalPlan],
+ filters: Seq[Expression],
+ relation: LogicalPlan): LogicalPlan = relation match {
+ case l @ LogicalRelation(files: HadoopFsRelation, _, _) =>
+ val attributeMap = l.output.map(attr => (attr.name, attr)).toMap
+ val partitionColumns = files.partitionSchema.map { field =>
+ attributeMap.getOrElse(field.name, throw new AnalysisException(
+ s"Unable to resolve ${field.name} given
[${l.output.map(_.name).mkString(", ")}]"))
+ }
+ val filterColumns = filters.flatMap(_.references)
+ val projectSet = parent.references ++ AttributeSet(filterColumns)
+ if (projectSet.subsetOf(AttributeSet(partitionColumns))) {
+ val selectedPartitions = files.location.listFiles(filters)
+ val partitionValues = selectedPartitions.map(_.values)
+ val valuesRdd =
sparkSession.sparkContext.parallelize(partitionValues, 1)
+ val valuesPlan = LogicalRDD(partitionColumns,
valuesRdd)(sparkSession)
+ val scanPlan = project.map(_.withNewChildren(valuesPlan ::
Nil)).getOrElse(valuesPlan)
+ parent.withNewChildren(scanPlan :: Nil)
+ } else {
+ parent
+ }
+
+ case relation: MetastoreRelation =>
+ if
(parent.references.subsetOf(AttributeSet(relation.partitionKeys))) {
+ val partitionColumnDataTypes =
relation.partitionKeys.map(_.dataType)
+ val partitionValues = relation.getHiveQlPartitions(filters).map
{ p =>
+
InternalRow.fromSeq(p.getValues.asScala.zip(partitionColumnDataTypes).map {
+ case (rawValue, dataType) => Cast(Literal(rawValue),
dataType).eval(null)
+ })
+ }
+ val valuesRdd =
sparkSession.sparkContext.parallelize(partitionValues, 1)
+ val valuesPlan = LogicalRDD(relation.partitionKeys,
valuesRdd)(sparkSession)
+ val filterPlan =
+ filters.reduceLeftOption(And).map(Filter(_,
valuesPlan)).getOrElse(valuesPlan)
+ val scanPlan = project.map(_.withNewChildren(filterPlan ::
Nil)).getOrElse(filterPlan)
+ parent.withNewChildren(scanPlan :: Nil)
+ } else {
+ parent
+ }
+
+ case _ =>
+ parent
+ }
+
+ def apply(plan: LogicalPlan): LogicalPlan = {
+ if (!sparkSession.sessionState.conf.optimizerMetadataOnly) {
+ return plan
+ }
+ plan.transform {
+ case a @ Aggregate(_, _, child) if canSupportMetadataOnly(a) =>
+ val (plan, filters) = findRelation(child)
+ if (plan.isDefined) {
+ convertToMetadataOnlyPlan(a, None, filters, plan.get)
+ } else {
+ a
+ }
+
+ case d @ Distinct(p @ Project(_, _)) =>
--- End diff --
why do we handle `Distinct` specially? It will be rewritten into
`Aggregate` and I think we should have a general rule to handle `Aggregate`
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]