Updated Branches:
  refs/heads/master 678dec668 -> 3249e0e90

Bump up logging level to warning for failed tasks.


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/f41feb7b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/f41feb7b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/f41feb7b

Branch: refs/heads/master
Commit: f41feb7b338b5fdd60260f5ce7cba94202102194
Parents: e33b183
Author: Reynold Xin <r...@apache.org>
Authored: Mon Oct 14 23:35:32 2013 -0700
Committer: Reynold Xin <r...@apache.org>
Committed: Mon Oct 14 23:35:32 2013 -0700

----------------------------------------------------------------------
 .../spark/scheduler/cluster/ClusterTaskSetManager.scala   | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/f41feb7b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
index e1366e0..7bd3499 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
@@ -456,13 +456,13 @@ private[spark] class ClusterTaskSetManager(
     val index = info.index
     info.markFailed()
     if (!successful(index)) {
-      logInfo("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index))
+      logWarning("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index))
       copiesRunning(index) -= 1
       // Check if the problem is a map output fetch failure. In that case, this
       // task will never succeed on any node, so tell the scheduler about it.
       reason.foreach {
         case fetchFailed: FetchFailed =>
-          logInfo("Loss was due to fetch failure from " + 
fetchFailed.bmAddress)
+          logWarning("Loss was due to fetch failure from " + 
fetchFailed.bmAddress)
           sched.listener.taskEnded(tasks(index), fetchFailed, null, null, 
info, null)
           successful(index) = true
           tasksSuccessful += 1
@@ -471,7 +471,7 @@ private[spark] class ClusterTaskSetManager(
           return
 
         case TaskKilled =>
-          logInfo("Task %d was killed.".format(tid))
+          logWarning("Task %d was killed.".format(tid))
           sched.listener.taskEnded(tasks(index), reason.get, null, null, info, 
null)
           return
 
@@ -496,14 +496,14 @@ private[spark] class ClusterTaskSetManager(
           }
           if (printFull) {
             val locs = ef.stackTrace.map(loc => "\tat %s".format(loc.toString))
-            logInfo("Loss was due to %s\n%s\n%s".format(
+            logWarning("Loss was due to %s\n%s\n%s".format(
               ef.className, ef.description, locs.mkString("\n")))
           } else {
             logInfo("Loss was due to %s [duplicate %d]".format(ef.description, 
dupCount))
           }
 
         case TaskResultLost =>
-          logInfo("Lost result for TID %s on host %s".format(tid, info.host))
+          logWarning("Lost result for TID %s on host %s".format(tid, 
info.host))
           sched.listener.taskEnded(tasks(index), TaskResultLost, null, null, 
info, null)
 
         case _ => {}

Reply via email to