This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 35952cb  [SPARK-27859][SS] Use efficient sorting instead of 
`.sorted.reverse` sequence
35952cb is described below

commit 35952cb42c2935d10f1306cc8d1b3fabc8107079
Author: wenxuanguan <[email protected]>
AuthorDate: Mon May 27 21:53:23 2019 -0700

    [SPARK-27859][SS] Use efficient sorting instead of `.sorted.reverse` 
sequence
    
    ## What changes were proposed in this pull request?
    
    descending sort in HDFSMetadataLog.getLatest instead of two action of 
ascending sort and reverse
    
    ## How was this patch tested?
    
    Jenkins
    
    Closes #24711 from wenxuanguan/bug-fix-hdfsmetadatalog.
    
    Authored-by: wenxuanguan <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../spark/sql/execution/streaming/CompactibleFileStreamLog.scala       | 3 +--
 .../org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala     | 3 +--
 streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala | 2 +-
 3 files changed, 3 insertions(+), 5 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
index 9e9bced..19c93c5 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
@@ -85,8 +85,7 @@ abstract class CompactibleFileStreamLog[T <: AnyRef : 
ClassTag](
     val compactibleBatchIds = fileManager.list(metadataPath, batchFilesFilter)
       .filter(f => 
f.getPath.toString.endsWith(CompactibleFileStreamLog.COMPACT_FILE_SUFFIX))
       .map(f => pathToBatchId(f.getPath))
-      .sorted
-      .reverse
+      .sorted(Ordering.Long.reverse)
 
     // Case 1
     var interval = defaultCompactInterval
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
index 62d524f..1224be7 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
@@ -177,8 +177,7 @@ class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: 
SparkSession, path:
   override def getLatest(): Option[(Long, T)] = {
     val batchIds = fileManager.list(metadataPath, batchFilesFilter)
       .map(f => pathToBatchId(f.getPath))
-      .sorted
-      .reverse
+      .sorted(Ordering.Long.reverse)
     for (batchId <- batchIds) {
       val batch = get(batchId)
       if (batch.isDefined) {
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala 
b/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
index dc7876b..3310f3b 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
@@ -104,7 +104,7 @@ private[ui] class BatchPage(parent: StreamingTab) extends 
WebUIPage("batch") {
       }
     }
     val lastFailureReason =
-      sparkJob.stageIds.sorted.reverse.flatMap(getStageData).
+      sparkJob.stageIds.sorted(Ordering.Int.reverse).flatMap(getStageData).
       dropWhile(_.failureReason == None).take(1). // get the first info that 
contains failure
       flatMap(info => info.failureReason).headOption.getOrElse("")
     val formattedDuration = duration.map(d => 
SparkUIUtils.formatDuration(d)).getOrElse("-")


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to