[ https://issues.apache.org/jira/browse/SPARK-43342?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17719137#comment-17719137 ]
Dongjoon Hyun commented on SPARK-43342: --------------------------------------- BTW, thank you for reporting, [~ofrenkel] . > Spark in Kubernetes mode throws IllegalArgumentException when using static PVC > ------------------------------------------------------------------------------ > > Key: SPARK-43342 > URL: https://issues.apache.org/jira/browse/SPARK-43342 > Project: Spark > Issue Type: Bug > Components: Kubernetes > Affects Versions: 3.4.0 > Reporter: Oleg Frenkel > Priority: Blocker > > When using static PVC with Spark 3.4, spark PI example fails with the error > below. Previous versions of Spark worked well. > {code:java} > 23/04/26 13:22:02 INFO ExecutorPodsAllocator: Going to request 5 executors > from Kubernetes for ResourceProfile Id: 0, target: 5, known: 0, > sharedSlotFromPendingPods: 2147483647. 23/04/26 13:22:02 INFO > BasicExecutorFeatureStep: Decommissioning not enabled, skipping shutdown > script 23/04/26 13:22:02 ERROR ExecutorPodsSnapshotsStoreImpl: Going to stop > due to IllegalArgumentException java.lang.IllegalArgumentException: PVC > ClaimName: a1pvc should contain OnDemand or SPARK_EXECUTOR_ID when requiring > multiple executors at > org.apache.spark.deploy.k8s.features.MountVolumesFeatureStep.checkPVCClaimName(MountVolumesFeatureStep.scala:135) > at > org.apache.spark.deploy.k8s.features.MountVolumesFeatureStep.$anonfun$constructVolumes$4(MountVolumesFeatureStep.scala:75) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286) > at scala.collection.Iterator.foreach(Iterator.scala:943) at > scala.collection.Iterator.foreach$(Iterator.scala:943) at > scala.collection.AbstractIterator.foreach(Iterator.scala:1431) at > scala.collection.IterableLike.foreach(IterableLike.scala:74) at > scala.collection.IterableLike.foreach$(IterableLike.scala:73) at > scala.collection.AbstractIterable.foreach(Iterable.scala:56) at > scala.collection.TraversableLike.map(TraversableLike.scala:286) at > scala.collection.TraversableLike.map$(TraversableLike.scala:279) at > scala.collection.AbstractTraversable.map(Traversable.scala:108) at > org.apache.spark.deploy.k8s.features.MountVolumesFeatureStep.constructVolumes(MountVolumesFeatureStep.scala:58) > at > org.apache.spark.deploy.k8s.features.MountVolumesFeatureStep.configurePod(MountVolumesFeatureStep.scala:35) > at > org.apache.spark.scheduler.cluster.k8s.KubernetesExecutorBuilder.$anonfun$buildFromFeatures$5(KubernetesExecutorBuilder.scala:83) > at > scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126) > at > scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122) > at scala.collection.immutable.List.foldLeft(List.scala:91) at > org.apache.spark.scheduler.cluster.k8s.KubernetesExecutorBuilder.buildFromFeatures(KubernetesExecutorBuilder.scala:82) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.$anonfun$requestNewExecutors$1(ExecutorPodsAllocator.scala:430) > at scala.collection.immutable.Range.foreach$mVc$sp(Range.scala:158) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.requestNewExecutors(ExecutorPodsAllocator.scala:417) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.$anonfun$onNewSnapshots$36(ExecutorPodsAllocator.scala:370) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.$anonfun$onNewSnapshots$36$adapted(ExecutorPodsAllocator.scala:363) > at > scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at > scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.onNewSnapshots(ExecutorPodsAllocator.scala:363) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.$anonfun$start$3(ExecutorPodsAllocator.scala:134) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator.$anonfun$start$3$adapted(ExecutorPodsAllocator.scala:134) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsSnapshotsStoreImpl$SnapshotsSubscriber.org$apache$spark$scheduler$cluster$k8s$ExecutorPodsSnapshotsStoreImpl$SnapshotsSubscriber$$processSnapshotsInternal(ExecutorPodsSnapshotsStoreImpl.scala:143) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsSnapshotsStoreImpl$SnapshotsSubscriber.processSnapshots(ExecutorPodsSnapshotsStoreImpl.scala:131) > at > org.apache.spark.scheduler.cluster.k8s.ExecutorPodsSnapshotsStoreImpl.$anonfun$addSubscriber$1(ExecutorPodsSnapshotsStoreImpl.scala:85) > at > java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) > at > java.base/java.util.concurrent.FutureTask.runAndReset(FutureTask.java:305) > at > java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:305) > at > java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) > at > java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) > at java.base/java.lang.Thread.run(Thread.java:833) {code} > How to reproduce: > # Create statically provisioned PV, for example nfs PV: > [https://kubernetes.io/docs/concepts/storage/volumes/#nfs] > # Create PVC that binds to PV above. > # Run Spark PI example: $SPARK_HOME/bin/spark-submit --master > k8s://kubernetes.default.svc --properties-file spark.properties > $SPARK_HOME/examples/src/main/python/pi.py 10 > spark.properties contents: > {code:java} > spark.executor.instances=5 > spark.kubernetes.executor.volumes.persistentVolumeClaim.nfs1.mount.path=/isilon/mnts > spark.kubernetes.executor.volumes.persistentVolumeClaim.nfs1.mount.readOnly=false > spark.kubernetes.executor.volumes.persistentVolumeClaim.nfs1.options.claimName=a1pvc > spark.kubernetes.driver.volumes.persistentVolumeClaim.nfs1.options.claimName=a1pvc > spark.kubernetes.driver.volumes.persistentVolumeClaim.nfs1.mount.readOnly=false > spark.kubernetes.driver.volumes.persistentVolumeClaim.nfs1.mount.path=/isilon/mnts > {code} -- This message was sent by Atlassian Jira (v8.20.10#820010) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org