Repository: spark
Updated Branches:
  refs/heads/master 865ec32dd -> 026eb9064


[SPARK-15875] Try to use Seq.isEmpty and Seq.nonEmpty instead of Seq.length == 
0 and Seq.length > 0

## What changes were proposed in this pull request?

In scala, immutable.List.length is an expensive operation so we should
avoid using Seq.length == 0 or Seq.lenth > 0, and use Seq.isEmpty and 
Seq.nonEmpty instead.

## How was this patch tested?
existing tests

Author: wangyang <wangy...@haizhi.com>

Closes #13601 from yangw1234/isEmpty.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/026eb906
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/026eb906
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/026eb906

Branch: refs/heads/master
Commit: 026eb90644be7685971dacaabae67a293edd0133
Parents: 865ec32
Author: wangyang <wangy...@haizhi.com>
Authored: Fri Jun 10 13:10:03 2016 -0700
Committer: Reynold Xin <r...@databricks.com>
Committed: Fri Jun 10 13:10:03 2016 -0700

----------------------------------------------------------------------
 .../org/apache/spark/api/python/PythonWorkerFactory.scala      | 2 +-
 .../scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala  | 2 +-
 .../apache/spark/util/collection/ExternalAppendOnlyMap.scala   | 6 +++---
 .../main/scala/org/apache/spark/mllib/clustering/KMeans.scala  | 2 +-
 .../org/apache/spark/sql/catalyst/analysis/Analyzer.scala      | 2 +-
 .../org/apache/spark/sql/catalyst/optimizer/Optimizer.scala    | 2 +-
 .../spark/sql/execution/aggregate/SortAggregateExec.scala      | 4 ++--
 .../org/apache/spark/sql/execution/metric/SQLMetrics.scala     | 2 +-
 .../apache/spark/streaming/dstream/TransformedDStream.scala    | 2 +-
 9 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala 
b/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
index 3df87f6..6a5e6f7 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
@@ -235,7 +235,7 @@ private[spark] class PythonWorkerFactory(pythonExec: 
String, envVars: Map[String
   }
 
   private def cleanupIdleWorkers() {
-    while (idleWorkers.length > 0) {
+    while (idleWorkers.nonEmpty) {
       val worker = idleWorkers.dequeue()
       try {
         // the worker will exit after closing the socket

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
index b6366f3..d744d67 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
@@ -60,7 +60,7 @@ class PartitionerAwareUnionRDD[T: ClassTag](
     sc: SparkContext,
     var rdds: Seq[RDD[T]]
   ) extends RDD[T](sc, rdds.map(x => new OneToOneDependency(x))) {
-  require(rdds.length > 0)
+  require(rdds.nonEmpty)
   require(rdds.forall(_.partitioner.isDefined))
   require(rdds.flatMap(_.partitioner).toSet.size == 1,
     "Parent RDDs have different partitioners: " + rdds.flatMap(_.partitioner))

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
 
b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index fc71f83..6ddc72a 100644
--- 
a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ 
b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -375,14 +375,14 @@ class ExternalAppendOnlyMap[K, V, C](
     /**
      * Return true if there exists an input stream that still has unvisited 
pairs.
      */
-    override def hasNext: Boolean = mergeHeap.length > 0
+    override def hasNext: Boolean = mergeHeap.nonEmpty
 
     /**
      * Select a key with the minimum hash, then combine all values with the 
same key from all
      * input streams.
      */
     override def next(): (K, C) = {
-      if (mergeHeap.length == 0) {
+      if (mergeHeap.isEmpty) {
         throw new NoSuchElementException
       }
       // Select a key from the StreamBuffer that holds the lowest key hash
@@ -397,7 +397,7 @@ class ExternalAppendOnlyMap[K, V, C](
       // For all other streams that may have this key (i.e. have the same 
minimum key hash),
       // merge in the corresponding value (if any) from that stream
       val mergedBuffers = ArrayBuffer[StreamBuffer](minBuffer)
-      while (mergeHeap.length > 0 && mergeHeap.head.minKeyHash == minHash) {
+      while (mergeHeap.nonEmpty && mergeHeap.head.minKeyHash == minHash) {
         val newBuffer = mergeHeap.dequeue()
         minCombiner = mergeIfKeyExists(minKey, minCombiner, newBuffer)
         mergedBuffers += newBuffer

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
index 38728f2..871b1c7 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
@@ -441,7 +441,7 @@ class KMeans private (
           val rs = (0 until runs).filter { r =>
             rand.nextDouble() < 2.0 * c(r) * k / sumCosts(r)
           }
-          if (rs.length > 0) Some((p, rs)) else None
+          if (rs.nonEmpty) Some((p, rs)) else None
         }
       }.collect()
       mergeNewCenters()

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 58f3904..be52700 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -1667,7 +1667,7 @@ class Analyzer(
 
         // We do a final check and see if we only have a single Window Spec 
defined in an
         // expressions.
-        if (distinctWindowSpec.length == 0 ) {
+        if (distinctWindowSpec.isEmpty) {
           failAnalysis(s"$expr does not have any WindowExpression.")
         } else if (distinctWindowSpec.length > 1) {
           // newExpressionsWithWindowFunctions only have expressions with a 
single

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
index 1e10d73..e342c23 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
@@ -617,7 +617,7 @@ object NullPropagation extends Rule[LogicalPlan] {
       // For Coalesce, remove null literals.
       case e @ Coalesce(children) =>
         val newChildren = children.filter(nonNullLiteral)
-        if (newChildren.length == 0) {
+        if (newChildren.isEmpty) {
           Literal.create(null, e.dataType)
         } else if (newChildren.length == 1) {
           newChildren.head

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala
index 1712651..05dbacf 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortAggregateExec.scala
@@ -56,8 +56,8 @@ case class SortAggregateExec(
 
   override def requiredChildDistribution: List[Distribution] = {
     requiredChildDistributionExpressions match {
-      case Some(exprs) if exprs.length == 0 => AllTuples :: Nil
-      case Some(exprs) if exprs.length > 0 => ClusteredDistribution(exprs) :: 
Nil
+      case Some(exprs) if exprs.isEmpty => AllTuples :: Nil
+      case Some(exprs) if exprs.nonEmpty => ClusteredDistribution(exprs) :: Nil
       case None => UnspecifiedDistribution :: Nil
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
index e63c7c5..edfdf7c 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
@@ -113,7 +113,7 @@ private[sql] object SQLMetrics {
 
       val validValues = values.filter(_ >= 0)
       val Seq(sum, min, med, max) = {
-        val metric = if (validValues.length == 0) {
+        val metric = if (validValues.isEmpty) {
           Seq.fill(4)(0L)
         } else {
           val sorted = validValues.sorted

http://git-wip-us.apache.org/repos/asf/spark/blob/026eb906/streaming/src/main/scala/org/apache/spark/streaming/dstream/TransformedDStream.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/dstream/TransformedDStream.scala
 
b/streaming/src/main/scala/org/apache/spark/streaming/dstream/TransformedDStream.scala
index 47eb9b8..0dde120 100644
--- 
a/streaming/src/main/scala/org/apache/spark/streaming/dstream/TransformedDStream.scala
+++ 
b/streaming/src/main/scala/org/apache/spark/streaming/dstream/TransformedDStream.scala
@@ -29,7 +29,7 @@ class TransformedDStream[U: ClassTag] (
     transformFunc: (Seq[RDD[_]], Time) => RDD[U]
   ) extends DStream[U](parents.head.ssc) {
 
-  require(parents.length > 0, "List of DStreams to transform is empty")
+  require(parents.nonEmpty, "List of DStreams to transform is empty")
   require(parents.map(_.ssc).distinct.size == 1, "Some of the DStreams have 
different contexts")
   require(parents.map(_.slideDuration).distinct.size == 1,
     "Some of the DStreams have different slide durations")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to