Merge branch 'master' into scala-2.10

Conflicts:
        core/src/test/scala/org/apache/spark/DistributedSuite.scala
        project/SparkBuild.scala


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/c810ee06
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/c810ee06
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/c810ee06

Branch: refs/heads/scala-2.10
Commit: c810ee0690d536873b1046541cdf1b492902a60e
Parents: 5829692 232765f
Author: Prashant Sharma <prashan...@imaginea.com>
Authored: Sat Oct 5 15:52:57 2013 +0530
Committer: Prashant Sharma <prashan...@imaginea.com>
Committed: Sat Oct 5 15:52:57 2013 +0530

----------------------------------------------------------------------
 .../scala/org/apache/spark/SparkContext.scala   |   2 +-
 .../scala/org/apache/spark/TaskEndReason.scala  |   8 +-
 .../org/apache/spark/executor/Executor.scala    |  26 +--
 .../apache/spark/scheduler/DAGScheduler.scala   |   5 +-
 .../scala/org/apache/spark/scheduler/Pool.scala |   6 +-
 .../apache/spark/scheduler/Schedulable.scala    |   4 +-
 .../org/apache/spark/scheduler/TaskResult.scala |  15 +-
 .../apache/spark/scheduler/TaskSetManager.scala |   2 -
 .../scheduler/cluster/ClusterScheduler.scala    |  55 +++++--
 .../cluster/ClusterTaskSetManager.scala         | 162 +++++++++----------
 .../scheduler/cluster/TaskResultGetter.scala    | 124 ++++++++++++++
 .../spark/scheduler/local/LocalScheduler.scala  |   5 +-
 .../scheduler/local/LocalTaskSetManager.scala   |  21 ++-
 .../org/apache/spark/storage/BlockManager.scala |  27 +++-
 .../org/apache/spark/DistributedSuite.scala     |   1 +
 .../spark/scheduler/DAGSchedulerSuite.scala     |   2 -
 .../cluster/ClusterSchedulerSuite.scala         |  15 +-
 .../cluster/ClusterTaskSetManagerSuite.scala    |  58 ++++++-
 .../cluster/TaskResultGetterSuite.scala         | 113 +++++++++++++
 docs/running-on-yarn.md                         |   1 +
 ec2/README                                      |   2 +-
 make-distribution.sh                            |   2 +-
 project/SparkBuild.scala                        |   3 +
 .../org/apache/spark/deploy/yarn/Client.scala   |   2 +-
 .../spark/deploy/yarn/ClientArguments.scala     |   6 +
 25 files changed, 510 insertions(+), 157 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/c810ee06/core/src/main/scala/org/apache/spark/SparkContext.scala
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/c810ee06/core/src/main/scala/org/apache/spark/executor/Executor.scala
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/c810ee06/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/c810ee06/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/c810ee06/core/src/test/scala/org/apache/spark/DistributedSuite.scala
----------------------------------------------------------------------
diff --cc core/src/test/scala/org/apache/spark/DistributedSuite.scala
index c719a54,cd2bf9a..a31988a
--- a/core/src/test/scala/org/apache/spark/DistributedSuite.scala
+++ b/core/src/test/scala/org/apache/spark/DistributedSuite.scala
@@@ -319,19 -319,6 +319,20 @@@ class DistributedSuite extends FunSuit
        }
      }
    }
 +
 +  test("job should fail if TaskResult exceeds Akka frame size") {
 +    // We must use local-cluster mode since results are returned differently
 +    // when running under LocalScheduler:
 +    sc = new SparkContext("local-cluster[1,1,512]", "test")
 +    val akkaFrameSize =
 +      
sc.env.actorSystem.settings.config.getBytes("akka.remote.netty.tcp.message-frame-size").toInt
 +    val rdd = sc.parallelize(Seq(1)).map{x => new Array[Byte](akkaFrameSize)}
 +    val exception = intercept[SparkException] {
 +      rdd.reduce((x, y) => x)
 +    }
 +    exception.getMessage should endWith("result exceeded Akka frame size")
 +  }
++
  }
  
  object DistributedSuite {

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/c810ee06/project/SparkBuild.scala
----------------------------------------------------------------------
diff --cc project/SparkBuild.scala
index 67d03f9,cdec616..19d3aa2
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@@ -97,6 -97,12 +97,9 @@@ object SparkBuild extends Build 
      // Only allow one test at a time, even across projects, since they run in 
the same JVM
      concurrentRestrictions in Global += Tags.limit(Tags.Test, 1),
  
+     // also check the local Maven repository ~/.m2
+     resolvers ++= Seq(Resolver.file("Local Maven Repo", file(Path.userHome + 
"/.m2/repository"))),
+ 
 -    // Shared between both core and streaming.
 -    resolvers ++= Seq("Akka Repository" at "http://repo.akka.io/releases/";),
 -
      // For Sonatype publishing
      resolvers ++= Seq("sonatype-snapshots" at 
"https://oss.sonatype.org/content/repositories/snapshots";,
        "sonatype-staging" at 
"https://oss.sonatype.org/service/local/staging/deploy/maven2/";),

Reply via email to