Repository: spark
Updated Branches:
  refs/heads/master 6b36470c6 -> 49a01c7ea


[SPARK-6423][Mesos] MemoryUtils should use memoryOverhead if it's set

- Fixed calculateTotalMemory to use spark.mesos.executor.memoryOverhead
- Added testCase

Author: Jongyoul Lee <jongy...@gmail.com>

Closes #5099 from jongyoul/SPARK-6423 and squashes the following commits:

6747fce [Jongyoul Lee] [SPARK-6423][Mesos] MemoryUtils should use 
memoryOverhead if it's set - Changed a description of 
spark.mesos.executor.memoryOverhead
475a7c8 [Jongyoul Lee] [SPARK-6423][Mesos] MemoryUtils should use 
memoryOverhead if it's set - Fit the import rules
453c5a2 [Jongyoul Lee] [SPARK-6423][Mesos] MemoryUtils should use 
memoryOverhead if it's set - Fixed calculateTotalMemory to use 
spark.mesos.executor.memoryOverhead - Added testCase


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/49a01c7e
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/49a01c7e
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/49a01c7e

Branch: refs/heads/master
Commit: 49a01c7ea2c48feee7ab4551c4fa03fd1cdb1a32
Parents: 6b36470
Author: Jongyoul Lee <jongy...@gmail.com>
Authored: Fri Mar 20 19:14:35 2015 +0000
Committer: Sean Owen <so...@cloudera.com>
Committed: Fri Mar 20 19:14:35 2015 +0000

----------------------------------------------------------------------
 .../scheduler/cluster/mesos/MemoryUtils.scala   | 10 ++---
 .../cluster/mesos/MemoryUtilsSuite.scala        | 47 ++++++++++++++++++++
 docs/running-on-mesos.md                        |  8 ++--
 3 files changed, 53 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/49a01c7e/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
index 705116c..aa3ec0f 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
@@ -21,15 +21,11 @@ import org.apache.spark.SparkContext
 
 private[spark] object MemoryUtils {
   // These defaults copied from YARN
-  val OVERHEAD_FRACTION = 1.10
+  val OVERHEAD_FRACTION = 0.10
   val OVERHEAD_MINIMUM = 384
 
   def calculateTotalMemory(sc: SparkContext) = {
-    math.max(
-      sc.conf.getOption("spark.mesos.executor.memoryOverhead")
-        .getOrElse(OVERHEAD_MINIMUM.toString)
-        .toInt + sc.executorMemory,
-        OVERHEAD_FRACTION * sc.executorMemory
-    )
+    sc.conf.getInt("spark.mesos.executor.memoryOverhead",
+      math.max(OVERHEAD_FRACTION * sc.executorMemory, OVERHEAD_MINIMUM).toInt) 
+ sc.executorMemory
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/49a01c7e/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
 
b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
new file mode 100644
index 0000000..3fa0115
--- /dev/null
+++ 
b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.scheduler.cluster.mesos
+
+import org.mockito.Mockito._
+import org.scalatest.FunSuite
+import org.scalatest.mock.MockitoSugar
+
+import org.apache.spark.{SparkConf, SparkContext}
+
+class MemoryUtilsSuite extends FunSuite with MockitoSugar {
+  test("MesosMemoryUtils should always override memoryOverhead when it's set") 
{
+    val sparkConf = new SparkConf
+
+    val sc = mock[SparkContext]
+    when(sc.conf).thenReturn(sparkConf)
+    
+    // 384 > sc.executorMemory * 0.1 => 512 + 384 = 896
+    when(sc.executorMemory).thenReturn(512)
+    assert(MemoryUtils.calculateTotalMemory(sc) === 896)
+    
+    // 384 < sc.executorMemory * 0.1 => 4096 + (4096 * 0.1) = 4505.6
+    when(sc.executorMemory).thenReturn(4096)
+    assert(MemoryUtils.calculateTotalMemory(sc) === 4505)
+
+    // set memoryOverhead
+    sparkConf.set("spark.mesos.executor.memoryOverhead", "100")
+    assert(MemoryUtils.calculateTotalMemory(sc) === 4196)
+    sparkConf.set("spark.mesos.executor.memoryOverhead", "400")
+    assert(MemoryUtils.calculateTotalMemory(sc) === 4496)
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/49a01c7e/docs/running-on-mesos.md
----------------------------------------------------------------------
diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index 6a9d304..c984639 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -224,11 +224,9 @@ See the [configuration page](configuration.html) for 
information on Spark config
   <td><code>spark.mesos.executor.memoryOverhead</code></td>
   <td>executor memory * 0.10, with minimum of 384</td>
   <td>
-    This value is an additive for <code>spark.executor.memory</code>, 
specified in MB,
-    which is used to calculate the total Mesos task memory. A value of 
<code>384</code>
-    implies a 384MB overhead. Additionally, there is a hard-coded 10% minimum
-    overhead. The final overhead will be the larger of either
-    `spark.mesos.executor.memoryOverhead` or 10% of `spark.executor.memory`.
+    The amount of additional memory, specified in MB, to be allocated per 
executor. By default,
+    the overhead will be larger of either 384 or 10% of 
`spark.executor.memory`. If it's set,
+    the final overhead will be this value.
   </td>
 </tr>
 </table>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to