Github user hvanhovell commented on a diff in the pull request:
https://github.com/apache/spark/pull/7057#discussion_r34182441
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/Window.scala ---
@@ -37,443 +67,615 @@ case class Window(
child: SparkPlan)
extends UnaryNode {
- override def output: Seq[Attribute] =
- (projectList ++ windowExpression).map(_.toAttribute)
+ override def output: Seq[Attribute] = projectList ++
windowExpression.map(_.toAttribute)
- override def requiredChildDistribution: Seq[Distribution] =
+ override def requiredChildDistribution: Seq[Distribution] = {
if (windowSpec.partitionSpec.isEmpty) {
- // This operator will be very expensive.
+ // Only show warning when the number of bytes is larger than 100 MB?
+ logWarning("No Partition Defined for Window operation! Moving all
data to a single "
+ + "partition, this can cause serious performance degradation.")
AllTuples :: Nil
- } else {
- ClusteredDistribution(windowSpec.partitionSpec) :: Nil
- }
-
- // Since window functions are adding columns to the input rows, the
child's outputPartitioning
- // is preserved.
- override def outputPartitioning: Partitioning = child.outputPartitioning
-
- override def requiredChildOrdering: Seq[Seq[SortOrder]] = {
- // The required child ordering has two parts.
- // The first part is the expressions in the partition specification.
- // We add these expressions to the required ordering to make sure
input rows are grouped
- // based on the partition specification. So, we only need to process a
single partition
- // at a time.
- // The second part is the expressions specified in the ORDER BY cluase.
- // Basically, we first use sort to group rows based on partition
specifications and then sort
- // Rows in a group based on the order specification.
- (windowSpec.partitionSpec.map(SortOrder(_, Ascending)) ++
windowSpec.orderSpec) :: Nil
+ } else ClusteredDistribution(windowSpec.partitionSpec) :: Nil
}
- // Since window functions basically add columns to input rows, this
operator
- // will not change the ordering of input rows.
+ override def requiredChildOrdering: Seq[Seq[SortOrder]] =
+ Seq(windowSpec.partitionSpec.map(SortOrder(_, Ascending)) ++
windowSpec.orderSpec)
+
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
- case class ComputedWindow(
- unbound: WindowExpression,
- windowFunction: WindowFunction,
- resultAttribute: AttributeReference)
-
- // A list of window functions that need to be computed for each group.
- private[this] val computedWindowExpressions = windowExpression.flatMap {
window =>
- window.collect {
- case w: WindowExpression =>
- ComputedWindow(
- w,
- BindReferences.bindReference(w.windowFunction, child.output),
- AttributeReference(s"windowResult:$w", w.dataType, w.nullable)())
+ @transient
+ private[this] lazy val (factories, projection, columns) = {
+ // Helper method for creating bound ordering objects.
+ def createBoundOrdering(frameType: FrameType, offset: Int) = frameType
match {
--- End diff --
I'll look into it.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]