Github user squito commented on a diff in the pull request:

    https://github.com/apache/spark/pull/5432#discussion_r29210659
  
    --- Diff: 
core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala ---
    @@ -232,76 +239,105 @@ private[history] class FsHistoryProvider(conf: 
SparkConf) extends ApplicationHis
                 e)
               None
           }
    -    }.toSeq.sortWith(compareAppInfo)
    -
    -    // When there are new logs, merge the new list with the existing one, 
maintaining
    -    // the expected ordering (descending end time). Maintaining the order 
is important
    -    // to avoid having to sort the list every time there is a request for 
the log list.
    -    if (newApps.nonEmpty) {
    -      val mergedApps = new mutable.LinkedHashMap[String, 
FsApplicationHistoryInfo]()
    -      def addIfAbsent(info: FsApplicationHistoryInfo): Unit = {
    -        if (!mergedApps.contains(info.id) ||
    -            
mergedApps(info.id).logPath.endsWith(EventLoggingListener.IN_PROGRESS) &&
    -            !info.logPath.endsWith(EventLoggingListener.IN_PROGRESS)) {
    -          mergedApps += (info.id -> info)
    -        }
    -      }
    +    }
     
    -      val newIterator = newApps.iterator.buffered
    -      val oldIterator = applications.values.iterator.buffered
    -      while (newIterator.hasNext && oldIterator.hasNext) {
    -        if (compareAppInfo(newIterator.head, oldIterator.head)) {
    -          addIfAbsent(newIterator.next())
    -        } else {
    -          addIfAbsent(oldIterator.next())
    +    if (newAttempts.isEmpty) {
    +      return
    +    }
    +
    +    // Build a map containing all apps that contain new attempts. The app 
information in this map
    +    // contains both the new app attempt, and those that were already 
loaded in the existing apps
    +    // map. If an attempt has been updated, it replaces the old attempt in 
the list.
    +    val newAppMap = new mutable.HashMap[String, FsApplicationHistoryInfo]()
    +    newAttempts.foreach { attempt =>
    +      val appInfo = newAppMap.get(attempt.appId)
    +        .orElse(applications.get(attempt.appId))
    +        .map { app =>
    +          val attempts =
    +            app.attempts.filter(_.attemptId != attempt.attemptId).toList 
++ List(attempt)
    +          new FsApplicationHistoryInfo(attempt.appId, attempt.name,
    +            attempts.sortWith(compareAttemptInfo))
             }
    +        .getOrElse(new FsApplicationHistoryInfo(attempt.appId, 
attempt.name, List(attempt)))
    +      newAppMap(attempt.appId) = appInfo
    +    }
    +
    +    // Merge the new app list with the existing one, maintaining the 
expected ordering (descending
    +    // end time). Maintaining the order is important to avoid having to 
sort the list every time
    +    // there is a request for the log list.
    +    val newApps = newAppMap.values.toSeq.sortWith(compareAppInfo)
    +    val mergedApps = new mutable.LinkedHashMap[String, 
FsApplicationHistoryInfo]()
    +    def addIfAbsent(info: FsApplicationHistoryInfo): Unit = {
    +      if (!mergedApps.contains(info.id)) {
    +        mergedApps += (info.id -> info)
           }
    -      newIterator.foreach(addIfAbsent)
    -      oldIterator.foreach(addIfAbsent)
    +    }
     
    -      applications = mergedApps
    +    val newIterator = newApps.iterator.buffered
    +    val oldIterator = applications.values.iterator.buffered
    +    while (newIterator.hasNext && oldIterator.hasNext) {
    +      if (newAppMap.contains(oldIterator.head.id)) {
    +        oldIterator.next()
    +      } else if (compareAppInfo(newIterator.head, oldIterator.head)) {
    +        addIfAbsent(newIterator.next())
    +      } else {
    +        addIfAbsent(oldIterator.next())
    +      }
         }
    +    newIterator.foreach(addIfAbsent)
    +    oldIterator.foreach(addIfAbsent)
    +
    +    applications = mergedApps
       }
     
       /**
        * Delete event logs from the log directory according to the clean 
policy defined by the user.
        */
    -  private def cleanLogs(): Unit = {
    +  private[history] def cleanLogs(): Unit = {
         try {
           val maxAge = 
conf.getTimeAsSeconds("spark.history.fs.cleaner.maxAge", "7d") * 1000
     
    -      val now = System.currentTimeMillis()
    +      val now = clock.getTimeMillis()
           val appsToRetain = new mutable.LinkedHashMap[String, 
FsApplicationHistoryInfo]()
     
    +      def shouldClean(attempt: FsApplicationAttemptInfo): Boolean = {
    +        now - attempt.lastUpdated > maxAge && attempt.completed
    +      }
    +
           // Scan all logs from the log directory.
           // Only completed applications older than the specified max age will 
be deleted.
    -      applications.values.foreach { info =>
    -        if (now - info.lastUpdated <= maxAge || !info.completed) {
    -          appsToRetain += (info.id -> info)
    -        } else {
    -          appsToClean += info
    +      applications.values.foreach { app =>
    +        val toClean = app.attempts.filter(shouldClean)
    +        attemptsToClean ++= toClean
    +
    +        if (toClean.isEmpty) {
    +          appsToRetain += (app.id -> app)
    +        } else if (toClean.size < app.attempts.size) {
    +          appsToRetain += (app.id ->
    +            new FsApplicationHistoryInfo(app.id, app.name,
    +              app.attempts.filter(!shouldClean(_)).toList))
    --- End diff --
    
    nit: instead of filtering twice, you can do
    ```
    val  (toClean, toKeep) = app.attempts.partition(shouldClean)
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to