Github user vanzin commented on a diff in the pull request:
https://github.com/apache/spark/pull/1218#discussion_r15545745
--- Diff:
core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala ---
@@ -116,80 +139,105 @@ private[history] class FsHistoryProvider(conf:
SparkConf) extends ApplicationHis
try {
val logStatus = fs.listStatus(new Path(logDir))
val logDirs = if (logStatus != null) logStatus.filter(_.isDir).toSeq
else Seq[FileStatus]()
- val logInfos = logDirs.filter {
- dir => fs.isFile(new Path(dir.getPath(),
EventLoggingListener.APPLICATION_COMPLETE))
- }
- val currentApps = Map[String, ApplicationHistoryInfo](
- appList.map(app => (app.id -> app)):_*)
-
- // For any application that either (i) is not listed or (ii) has
changed since the last time
- // the listing was created (defined by the log dir's modification
time), load the app's info.
- // Otherwise just reuse what's already in memory.
- val newApps = new
mutable.ArrayBuffer[ApplicationHistoryInfo](logInfos.size)
- for (dir <- logInfos) {
- val curr = currentApps.getOrElse(dir.getPath().getName(), null)
- if (curr == null || curr.lastUpdated < getModificationTime(dir)) {
+ // Load all new logs from the log directory. Only directories that
have a modification time
+ // later than the last known log directory will be loaded.
+ var newMostRecentModTime = mostRecentLogModTime
+ val logInfos = logDirs
+ .filter { dir =>
+ if (fs.isFile(new Path(dir.getPath(),
EventLoggingListener.APPLICATION_COMPLETE))) {
+ val modTime = getModificationTime(dir)
+ newMostRecentModTime = math.max(newMostRecentModTime, modTime)
+ modTime > mostRecentLogModTime
+ } else {
+ false
+ }
+ }
+ .map { dir =>
try {
- newApps += loadAppInfo(dir, false)._1
+ val (replayBus, appListener) = createReplayBus(dir)
+ replayBus.replay()
+ new FsApplicationHistoryInfo(
+ dir.getPath().getName(),
+ appListener.appId.getOrElse(dir.getPath().getName()),
+ appListener.appName,
+ appListener.startTime,
+ appListener.endTime,
+ getModificationTime(dir),
+ appListener.sparkUser)
} catch {
- case e: Exception => logError(s"Failed to load app info from
directory $dir.")
+ case e: Exception =>
+ logInfo(s"Failed to load application log data from $dir.", e)
+ null
+ }
+ }
+ .sortBy { info => -info.endTime }
+
+ mostRecentLogModTime = newMostRecentModTime
+
+ if (!logInfos.isEmpty) {
+ var newAppList = new mutable.LinkedHashMap[String,
FsApplicationHistoryInfo]()
+
+ // Merge the new apps with the existing ones, discarding any
duplicates. The new map
+ // is created in descending end time order.
+ val currentApps = appList.values.iterator
+ var current = if (currentApps.hasNext) currentApps.next else null
+ def addOldInfo(oldInfo: FsApplicationHistoryInfo) = {
+ if (!newAppList.contains(oldInfo.id)) {
+ newAppList += (oldInfo.id -> oldInfo)
+ }
+ }
+
+
+ logInfos.foreach { info =>
+ if (info != null) {
+ while (current != null && current.endTime > info.endTime) {
+ addOldInfo(current)
+ current = if (currentApps.hasNext) currentApps.next else null
+ }
+
+ newAppList += (info.id -> info)
}
- } else {
- newApps += curr
}
- }
- appList = newApps.sortBy { info => -info.endTime }
+ if (current != null) {
+ addOldInfo(current)
+ }
+ currentApps.foreach { oldInfo =>
+ addOldInfo(oldInfo)
+ }
+
+ appList = newAppList
+ }
--- End diff --
This is
https://github.com/vanzin/spark/commit/abc46974b8bfbf08eb9544c4ab8d366ce3bc2011,
in case you want to look at the change in isolation.
Basically, since now we need to maintain a map of app id -> app metadata,
we need slightly different code to have the optimization where we only load
data for new applications from HDFS.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---