Repository: spark
Updated Branches:
  refs/heads/branch-2.0 bec077069 -> 0cfc0469b


Revert "[SPARK-17480][SQL][FOLLOWUP] Fix more instances which calls 
List.length/size which is O(n)"

This reverts commit a3bba372abce926351335d0a2936b70988f19b23.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/0cfc0469
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/0cfc0469
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/0cfc0469

Branch: refs/heads/branch-2.0
Commit: 0cfc0469b40450aee5d909641b4296b3a978b2d6
Parents: bec0770
Author: Tathagata Das <tathagata.das1...@gmail.com>
Authored: Sat Sep 17 14:18:40 2016 -0700
Committer: Tathagata Das <tathagata.das1...@gmail.com>
Committed: Sat Sep 17 14:18:40 2016 -0700

----------------------------------------------------------------------
 .../spark/sql/catalyst/analysis/Analyzer.scala  | 28 +++++++++++++-------
 .../expressions/conditionalExpressions.scala    |  3 +--
 .../sql/catalyst/expressions/ordering.scala     |  3 +--
 .../sql/catalyst/util/QuantileSummaries.scala   |  0
 .../apache/spark/sql/hive/HiveInspectors.scala  |  6 ++---
 .../org/apache/spark/sql/hive/TableReader.scala |  3 +--
 .../org/apache/spark/sql/hive/hiveUDFs.scala    |  3 +--
 .../spark/sql/hive/orc/OrcFileFormat.scala      |  6 ++---
 8 files changed, 27 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 3e4c769..14e995e 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -1649,17 +1649,27 @@ class Analyzer(
         }
       }.toSeq
 
-      // Third, we aggregate them by adding each Window operator for each 
Window Spec and then
-      // setting this to the child of the next Window operator.
-      val windowOps =
-        groupedWindowExpressions.foldLeft(child) {
-          case (last, ((partitionSpec, orderSpec), windowExpressions)) =>
-            Window(windowExpressions, partitionSpec, orderSpec, last)
-        }
+      // Third, for every Window Spec, we add a Window operator and set 
currentChild as the
+      // child of it.
+      var currentChild = child
+      var i = 0
+      while (i < groupedWindowExpressions.size) {
+        val ((partitionSpec, orderSpec), windowExpressions) = 
groupedWindowExpressions(i)
+        // Set currentChild to the newly created Window operator.
+        currentChild =
+          Window(
+            windowExpressions,
+            partitionSpec,
+            orderSpec,
+            currentChild)
+
+        // Move to next Window Spec.
+        i += 1
+      }
 
-      // Finally, we create a Project to output windowOps's output
+      // Finally, we create a Project to output currentChild's output
       // newExpressionsWithWindowFunctions.
-      Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps)
+      Project(currentChild.output ++ newExpressionsWithWindowFunctions, 
currentChild)
     } // end of addWindow
 
     // We have to use transformDown at here to make sure the rule of

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
index f9499cf..5f2585f 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
@@ -126,8 +126,7 @@ abstract class CaseWhenBase(
 
   override def eval(input: InternalRow): Any = {
     var i = 0
-    val size = branches.size
-    while (i < size) {
+    while (i < branches.size) {
       if (java.lang.Boolean.TRUE.equals(branches(i)._1.eval(input))) {
         return branches(i)._2.eval(input)
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala
index 9a89290..6112259 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ordering.scala
@@ -31,8 +31,7 @@ class InterpretedOrdering(ordering: Seq[SortOrder]) extends 
Ordering[InternalRow
 
   def compare(a: InternalRow, b: InternalRow): Int = {
     var i = 0
-    val size = ordering.size
-    while (i < size) {
+    while (i < ordering.size) {
       val order = ordering(i)
       val left = order.child.eval(a)
       val right = order.child.eval(b)

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/QuantileSummaries.scala
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
index 9d56aec..f5c3536 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
@@ -557,8 +557,7 @@ private[hive] trait HiveInspectors {
       // 1. create the pojo (most likely) object
       val result = x.create()
       var i = 0
-      val size = fieldRefs.size
-      while (i < size) {
+      while (i < fieldRefs.size) {
         // 2. set the property for the pojo
         val tpe = structType(i).dataType
         x.setStructFieldData(
@@ -575,8 +574,7 @@ private[hive] trait HiveInspectors {
       val row = a.asInstanceOf[InternalRow]
       val result = new java.util.ArrayList[AnyRef](fieldRefs.size)
       var i = 0
-      val size = fieldRefs.size
-      while (i < size) {
+      while (i < fieldRefs.size) {
         val tpe = structType(i).dataType
         result.add(wrap(row.get(i, tpe), 
fieldRefs.get(i).getFieldObjectInspector, tpe))
         i += 1

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
index a768b9d..e4cb33b 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
@@ -426,8 +426,7 @@ private[hive] object HadoopTableReader extends 
HiveInspectors with Logging {
     iterator.map { value =>
       val raw = converter.convert(rawDeser.deserialize(value))
       var i = 0
-      val length = fieldRefs.length
-      while (i < length) {
+      while (i < fieldRefs.length) {
         val fieldValue = soi.getStructFieldData(raw, fieldRefs(i))
         if (fieldValue == null) {
           mutableRow.setNullAt(fieldOrdinals(i))

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
index a5f800d..c536756 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
@@ -150,8 +150,7 @@ private[hive] case class HiveGenericUDF(
     returnInspector // Make sure initialized.
 
     var i = 0
-    val length = children.length
-    while (i < length) {
+    while (i < children.length) {
       val idx = i
       deferredObjects(i).asInstanceOf[DeferredObjectAdapter]
         .set(() => children(idx).eval(input))

http://git-wip-us.apache.org/repos/asf/spark/blob/0cfc0469/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
index d15fb84..9843f07 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
@@ -194,8 +194,7 @@ private[orc] class OrcSerializer(dataSchema: StructType, 
conf: Configuration)
       row: InternalRow): Unit = {
     val fieldRefs = oi.getAllStructFieldRefs
     var i = 0
-    val size = fieldRefs.size
-    while (i < size) {
+    while (i < fieldRefs.size) {
 
       oi.setStructFieldData(
         struct,
@@ -359,8 +358,7 @@ private[orc] object OrcRelation extends HiveInspectors {
       iterator.map { value =>
         val raw = deserializer.deserialize(value)
         var i = 0
-        val length = fieldRefs.length
-        while (i < length) {
+        while (i < fieldRefs.length) {
           val fieldValue = oi.getStructFieldData(raw, fieldRefs(i))
           if (fieldValue == null) {
             mutableRow.setNullAt(fieldOrdinals(i))


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to