Github user brad-kaiser commented on a diff in the pull request:
https://github.com/apache/spark/pull/19041#discussion_r156744499
--- Diff:
core/src/test/scala/org/apache/spark/CacheRecoveryManagerSuite.scala ---
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark
+
+import java.util.concurrent.ConcurrentHashMap
+import java.util.concurrent.atomic.AtomicInteger
+
+import scala.concurrent.{Future, Promise}
+import scala.concurrent.ExecutionContext.Implicits.global
+import scala.reflect.ClassTag
+
+import org.mockito.Mockito._
+import org.scalatest.Matchers
+import org.scalatest.mock.MockitoSugar
+
+import org.apache.spark.rpc._
+import org.apache.spark.storage.{BlockId, BlockManagerId, RDDBlockId}
+import org.apache.spark.storage.BlockManagerMessages._
+
+class CacheRecoveryManagerSuite extends SparkFunSuite with MockitoSugar
with Matchers {
+ val oneGB: Long = 1024L * 1024L * 1024L * 1024L
+ val plentyOfMem = Map(BlockManagerId("1", "host", 12, None) -> ((oneGB,
oneGB)),
+ BlockManagerId("2", "host", 12, None) -> ((oneGB,
oneGB)),
+ BlockManagerId("3", "host", 12, None) -> ((oneGB,
oneGB)))
+
+ test("GracefulShutdown will take blocks until empty and then kill
executor") {
+ val conf = new SparkConf()
+ val eam = mock[ExecutorAllocationManager]
+ val blocks = Seq(RDDBlockId(1, 1), RDDBlockId(2, 1))
+ val bmme = FakeBMM(1, blocks.iterator, plentyOfMem)
+ val bmmeRef = DummyRef(bmme)
+ val cacheRecoveryManager = new CacheRecoveryManager(bmmeRef, eam, conf)
+
+ when(eam.killExecutors(Seq("1"))).thenReturn(Seq("1"))
+
+ cacheRecoveryManager.startCacheRecovery(Seq("1"))
+ Thread.sleep(1000)
+ verify(eam).killExecutors(Seq("1"))
+
+
+ bmme.replicated.get("1").get shouldBe 2
+ }
+
+ test("GracefulShutdown will kill executor if it takes too long to
replicate") {
+ val conf = new
SparkConf().set("spark.dynamicAllocation.cacheRecovery.timeout", "1s")
+ val eam = mock[ExecutorAllocationManager]
+ val blocks = Set(RDDBlockId(1, 1), RDDBlockId(2, 1), RDDBlockId(3, 1),
RDDBlockId(4, 1))
+ val bmme = FakeBMM(600, blocks.iterator, plentyOfMem)
+ val bmmeRef = DummyRef(bmme)
+ val cacheRecoveryManager = new CacheRecoveryManager(bmmeRef, eam, conf)
+
+ cacheRecoveryManager.startCacheRecovery(Seq("1"))
+ Thread.sleep(1010)
+ verify(eam, times(1)).killExecutors(Seq("1"))
+ bmme.replicated.get("1").get shouldBe 1
+ }
+
+ test("shutdown timer will get cancelled if replication finishes") {
+ val conf = new
SparkConf().set("spark.dynamicAllocation.cacheRecovery.timeout", "1s")
+ val eam = mock[ExecutorAllocationManager]
+ val blocks = Set(RDDBlockId(1, 1))
+ val bmme = FakeBMM(1, blocks.iterator, plentyOfMem)
+ val bmmeRef = DummyRef(bmme)
+ val cacheRecoveryManager = new CacheRecoveryManager(bmmeRef, eam, conf)
+
+ cacheRecoveryManager.startCacheRecovery(Seq("1"))
+ Thread.sleep(1100)
+ // should be killed once not twice
+ verify(eam, times(1)).killExecutors(Seq("1"))
+ }
+
+
+ test("Blocks won't replicate if we are running out of space") {
+ val conf = new SparkConf()
+ val eam = mock[ExecutorAllocationManager]
+ val blocks = Seq(RDDBlockId(1, 1), RDDBlockId(1, 1), RDDBlockId(1, 1),
RDDBlockId(1, 1))
+ val memStatus = Map(BlockManagerId("1", "host", 12, None) -> ((2L,
1L)),
+ BlockManagerId("2", "host", 12, None) -> ((3L, 1L)),
+ BlockManagerId("3", "host", 12, None) -> ((4L, 1L)),
+ BlockManagerId("4", "host", 12, None) -> ((4L, 4L)))
+ val bmme = FakeBMM(1, blocks.iterator, memStatus)
+ val bmmeRef = DummyRef(bmme)
+ val cacheRecoveryManager = new CacheRecoveryManager(bmmeRef, eam, conf)
+
+ cacheRecoveryManager.startCacheRecovery(Seq("1", "2", "3"))
+ Thread.sleep(100)
+ bmme.replicated.size shouldBe 2
+ }
+
+ test("Blocks won't replicate if we are stopping all executors") {
+ val conf = new SparkConf()
+ val eam = mock[ExecutorAllocationManager]
+ val blocks = Seq(RDDBlockId(1, 1), RDDBlockId(1, 1), RDDBlockId(1, 1),
RDDBlockId(1, 1))
+ val memStatus = Map(BlockManagerId("1", "host", 12, None) -> ((2L,
1L)),
+ BlockManagerId("2", "host", 12, None) -> ((2L,
1L)),
--- End diff --
fixed
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]