This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/spark-kubernetes-operator.git


The following commit(s) were added to refs/heads/main by this push:
     new e161205  [SPARK-49657] Add multi instances e2e
e161205 is described below

commit e161205c781338114ddb86acafa2d0b2e19e05af
Author: Qi Tan <[email protected]>
AuthorDate: Mon Sep 16 21:27:01 2024 -0700

    [SPARK-49657] Add multi instances e2e
    
    ### What changes were proposed in this pull request?
    Add e2e test for two instances of Spark Operator running at the same time.
    
    ### Why are the changes needed?
    There is one scenario, in a cluster, user deployed multi instances of 
operator and make them watching on several namespaces. For example,
    operator A in default namespace, watching on namespace spark-1...
    operator B in default-2 namespace, watching on namespace spark-2..
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    test locally
    
    ### Was this patch authored or co-authored using generative AI tooling?
    no
    
    Closes #126 from TQJADE/multi-instance.
    
    Authored-by: Qi Tan <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../dynamic-config-values-2.yaml}                  | 43 +++++++++++--------
 tests/e2e/watched-namespaces/chainsaw-test.yaml    | 50 +++++++++++++++++++++-
 tests/e2e/watched-namespaces/spark-example.yaml    |  2 +
 3 files changed, 76 insertions(+), 19 deletions(-)

diff --git a/tests/e2e/watched-namespaces/spark-example.yaml 
b/tests/e2e/helm/dynamic-config-values-2.yaml
similarity index 54%
copy from tests/e2e/watched-namespaces/spark-example.yaml
copy to tests/e2e/helm/dynamic-config-values-2.yaml
index dba59ab..aacc0f1 100644
--- a/tests/e2e/watched-namespaces/spark-example.yaml
+++ b/tests/e2e/helm/dynamic-config-values-2.yaml
@@ -1,4 +1,3 @@
-#
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
@@ -6,27 +5,35 @@
 # (the "License"); you may not use this file except in compliance with
 # the License.  You may obtain a copy of the License at
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#     http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-apiVersion: spark.apache.org/v1alpha1
-kind: SparkApplication
-metadata:
-  name: spark-job-succeeded-test
-  namespace: ($SPARK_APP_NAMESPACE)
-spec:
-  mainClass: "org.apache.spark.examples.SparkPi"
-  jars: 
"local:///opt/spark/examples/jars/spark-examples_2.13-4.0.0-preview1.jar"
-  sparkConf:
-    spark.executor.instances: "1"
-    spark.kubernetes.container.image: 
"spark:4.0.0-preview1-scala2.13-java17-ubuntu"
-    spark.kubernetes.authenticate.driver.serviceAccountName: "spark"
-  runtimeVersions:
-    sparkVersion: 4.0.0-preview1
-    scalaVersion: "2.13"
\ No newline at end of file
+workloadResources:
+  namespaces:
+    overrideWatchedNamespaces: false
+    data:
+      - "spark-3"
+  role:
+    create: true
+  roleBinding:
+    create: true
+  clusterRole:
+    name: spark-workload-clusterrole-2
+
+operatorConfiguration:
+  dynamicConfig:
+    enable: true
+    create: true
+    data:
+      spark.kubernetes.operator.watchedNamespaces: "spark-3"
+
+operatorRbac:
+  clusterRole:
+    name: "spark-operator-clusterrole-2"
+  clusterRoleBinding:
+    name: "spark-operator-clusterrolebinding-2"
\ No newline at end of file
diff --git a/tests/e2e/watched-namespaces/chainsaw-test.yaml 
b/tests/e2e/watched-namespaces/chainsaw-test.yaml
index 82ed409..fdffa0a 100644
--- a/tests/e2e/watched-namespaces/chainsaw-test.yaml
+++ b/tests/e2e/watched-namespaces/chainsaw-test.yaml
@@ -71,4 +71,52 @@ spec:
           content: |
             kubectl delete sparkapplication spark-job-succeeded-test -n 
spark-1 --ignore-not-found=true
             kubectl delete sparkapplication spark-job-succeeded-test -n 
spark-2 --ignore-not-found=true
-            kubectl replace -f spark-operator-dynamic-config-2.yaml
\ No newline at end of file
+            kubectl replace -f spark-operator-dynamic-config-2.yaml
+  - try:
+      - script:
+          content: |
+            echo "Installing another spark operator in default-2 namespaces, 
watching on namespace: spark-3"
+            helm install spark-kubernetes-operator -n default-2 
--create-namespace -f \
+            ../../../build-tools/helm/spark-kubernetes-operator/values.yaml -f 
\
+            ../helm/dynamic-config-values-2.yaml \
+            ../../../build-tools/helm/spark-kubernetes-operator/
+      - apply:
+          bindings:
+            - name: SPARK_APP_NAMESPACE
+              value: spark-3
+          file: spark-example.yaml
+      - sleep:
+          duration: 60s
+      - script:
+          content:
+            kubectl logs -n default-2 $(kubectl get pods -n default-2 -o=name 
-l 
app.kubernetes.io/component=operator-deployment,app.kubernetes.io/name=spark-kubernetes-operator)
+          check:
+            (contains($stdout, 'Updating operator namespaces to [spark-3]')): 
true
+      - assert:
+          bindings:
+            - name: SPARK_APP_NAMESPACE
+              value: spark-3
+          timeout: 180s
+          file: "../assertions/spark-application/spark-state-transition.yaml"
+    catch:
+      - podLogs:
+          namespace: default-2
+          selector: 
app.kubernetes.io/component=operator-deployment,app.kubernetes.io/name=spark-kubernetes-operator
+      - describe:
+          apiVersion: spark.apache.org/v1alpha1
+          kind: SparkApplication
+          namespace: spark-3
+      - get:
+          apiVersion: v1
+          kind: Pod
+          namespace: spark-3
+      - events:
+          namespace: spark-3
+    finally:
+      - script:
+          timeout: 60s
+          content: |
+            kubectl delete sparkapplication spark-job-succeeded-test -n 
spark-3 --ignore-not-found=true
+            helm uninstall spark-kubernetes-operator -n default-2
+            kubectl delete namespace default-2 --ignore-not-found=true
+            kubectl delete namespace spark-3 --ignore-not-found=true
\ No newline at end of file
diff --git a/tests/e2e/watched-namespaces/spark-example.yaml 
b/tests/e2e/watched-namespaces/spark-example.yaml
index dba59ab..031ea08 100644
--- a/tests/e2e/watched-namespaces/spark-example.yaml
+++ b/tests/e2e/watched-namespaces/spark-example.yaml
@@ -27,6 +27,8 @@ spec:
     spark.executor.instances: "1"
     spark.kubernetes.container.image: 
"spark:4.0.0-preview1-scala2.13-java17-ubuntu"
     spark.kubernetes.authenticate.driver.serviceAccountName: "spark"
+    spark.kubernetes.driver.request.cores: "0.5"
+    spark.kubernetes.executor.request.cores: "0.5"
   runtimeVersions:
     sparkVersion: 4.0.0-preview1
     scalaVersion: "2.13"
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to