Merge pull request #59 from rxin/warning

Bump up logging level to warning for failed tasks.

(cherry picked from commit 3249e0e90dd9a7b422f561c42407b6a2b3feab17)
Signed-off-by: Reynold Xin <r...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/27600558
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/27600558
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/27600558

Branch: refs/heads/branch-0.8
Commit: 2760055864da508a95565e01b9f540becc0694b7
Parents: 1e67234
Author: Matei Zaharia <ma...@eecs.berkeley.edu>
Authored: Tue Oct 15 14:12:33 2013 -0700
Committer: Reynold Xin <r...@apache.org>
Committed: Thu Oct 17 18:34:56 2013 -0700

----------------------------------------------------------------------
 .../spark/scheduler/cluster/ClusterTaskSetManager.scala   | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/27600558/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
index e1366e0..7bd3499 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala
@@ -456,13 +456,13 @@ private[spark] class ClusterTaskSetManager(
     val index = info.index
     info.markFailed()
     if (!successful(index)) {
-      logInfo("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index))
+      logWarning("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index))
       copiesRunning(index) -= 1
       // Check if the problem is a map output fetch failure. In that case, this
       // task will never succeed on any node, so tell the scheduler about it.
       reason.foreach {
         case fetchFailed: FetchFailed =>
-          logInfo("Loss was due to fetch failure from " + 
fetchFailed.bmAddress)
+          logWarning("Loss was due to fetch failure from " + 
fetchFailed.bmAddress)
           sched.listener.taskEnded(tasks(index), fetchFailed, null, null, 
info, null)
           successful(index) = true
           tasksSuccessful += 1
@@ -471,7 +471,7 @@ private[spark] class ClusterTaskSetManager(
           return
 
         case TaskKilled =>
-          logInfo("Task %d was killed.".format(tid))
+          logWarning("Task %d was killed.".format(tid))
           sched.listener.taskEnded(tasks(index), reason.get, null, null, info, 
null)
           return
 
@@ -496,14 +496,14 @@ private[spark] class ClusterTaskSetManager(
           }
           if (printFull) {
             val locs = ef.stackTrace.map(loc => "\tat %s".format(loc.toString))
-            logInfo("Loss was due to %s\n%s\n%s".format(
+            logWarning("Loss was due to %s\n%s\n%s".format(
               ef.className, ef.description, locs.mkString("\n")))
           } else {
             logInfo("Loss was due to %s [duplicate %d]".format(ef.description, 
dupCount))
           }
 
         case TaskResultLost =>
-          logInfo("Lost result for TID %s on host %s".format(tid, info.host))
+          logWarning("Lost result for TID %s on host %s".format(tid, 
info.host))
           sched.listener.taskEnded(tasks(index), TaskResultLost, null, null, 
info, null)
 
         case _ => {}

Reply via email to