Repository: spark
Updated Branches:
  refs/heads/master 424d8c6ff -> 8c06a5faa


[SPARK-5336][YARN]spark.executor.cores must not be less than spark.task.cpus

https://issues.apache.org/jira/browse/SPARK-5336

Author: WangTao <barneystin...@aliyun.com>
Author: WangTaoTheTonic <barneystin...@aliyun.com>

Closes #4123 from WangTaoTheTonic/SPARK-5336 and squashes the following commits:

6c9676a [WangTao] Update ClientArguments.scala
9632d3a [WangTaoTheTonic] minor comment fix
d03d6fa [WangTaoTheTonic] import ordering should be alphabetical'
3112af9 [WangTao] spark.executor.cores must not be less than spark.task.cpus


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/8c06a5fa
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/8c06a5fa
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/8c06a5fa

Branch: refs/heads/master
Commit: 8c06a5faacfc71050461273133b9cf9a9dd8986f
Parents: 424d8c6
Author: WangTao <barneystin...@aliyun.com>
Authored: Wed Jan 21 09:42:30 2015 -0600
Committer: Thomas Graves <tgra...@apache.org>
Committed: Wed Jan 21 09:42:30 2015 -0600

----------------------------------------------------------------------
 .../org/apache/spark/ExecutorAllocationManager.scala      |  2 +-
 .../org/apache/spark/scheduler/TaskSchedulerImpl.scala    |  2 +-
 .../org/apache/spark/deploy/yarn/ClientArguments.scala    | 10 +++++++---
 3 files changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/8c06a5fa/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala 
b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
index a0ee2a7..b28da19 100644
--- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
+++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
@@ -158,7 +158,7 @@ private[spark] class ExecutorAllocationManager(
         "shuffle service. You may enable this through 
spark.shuffle.service.enabled.")
     }
     if (tasksPerExecutor == 0) {
-      throw new SparkException("spark.executor.cores must not be less than 
spark.task.cpus.cores")
+      throw new SparkException("spark.executor.cores must not be less than 
spark.task.cpus.")
     }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/8c06a5fa/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala 
b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index a1dfb01..33a7aae 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -168,7 +168,7 @@ private[spark] class TaskSchedulerImpl(
             if (!hasLaunchedTask) {
               logWarning("Initial job has not accepted any resources; " +
                 "check your cluster UI to ensure that workers are registered " 
+
-                "and have sufficient memory")
+                "and have sufficient resources")
             } else {
               this.cancel()
             }

http://git-wip-us.apache.org/repos/asf/spark/blob/8c06a5fa/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
----------------------------------------------------------------------
diff --git 
a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala 
b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
index 79bead7..f96b245 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
@@ -19,9 +19,9 @@ package org.apache.spark.deploy.yarn
 
 import scala.collection.mutable.ArrayBuffer
 
-import org.apache.spark.SparkConf
+import org.apache.spark.{SparkConf, SparkException}
 import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._
-import org.apache.spark.util.{Utils, IntParam, MemoryParam}
+import org.apache.spark.util.{IntParam, MemoryParam, Utils}
 
 // TODO: Add code and support for ensuring that yarn resource 'tasks' are 
location aware !
 private[spark] class ClientArguments(args: Array[String], sparkConf: 
SparkConf) {
@@ -95,6 +95,10 @@ private[spark] class ClientArguments(args: Array[String], 
sparkConf: SparkConf)
       throw new IllegalArgumentException(
         "You must specify at least 1 executor!\n" + getUsageMessage())
     }
+    if (executorCores < sparkConf.getInt("spark.task.cpus", 1)) {
+      throw new SparkException("Executor cores must not be less than " +
+        "spark.task.cpus.")
+    }
     if (isClusterMode) {
       for (key <- Seq(amMemKey, amMemOverheadKey, amCoresKey)) {
         if (sparkConf.contains(key)) {
@@ -222,7 +226,7 @@ private[spark] class ClientArguments(args: Array[String], 
sparkConf: SparkConf)
       |  --arg ARG                Argument to be passed to your application's 
main class.
       |                           Multiple invocations are possible, each will 
be passed in order.
       |  --num-executors NUM      Number of executors to start (Default: 2)
-      |  --executor-cores NUM     Number of cores for the executors (Default: 
1).
+      |  --executor-cores NUM     Number of cores per executor (Default: 1).
       |  --driver-memory MEM      Memory for driver (e.g. 1000M, 2G) (Default: 
512 Mb)
       |  --driver-cores NUM       Number of cores used by the driver (Default: 
1).
       |  --executor-memory MEM    Memory per executor (e.g. 1000M, 2G) 
(Default: 1G)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to