This is an automated email from the ASF dual-hosted git repository.

mani pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git


The following commit(s) were added to refs/heads/master by this push:
     new f2819084 [YUNIKORN-2715] Handle special characters for params like 
queue, username & groupname (#867)
f2819084 is described below

commit f2819084f8720aa0eec8e1f41a886413b22d93b2
Author: Manikandan R <[email protected]>
AuthorDate: Tue Jul 16 10:52:06 2024 +0530

    [YUNIKORN-2715] Handle special characters for params like queue, username & 
groupname (#867)
    
    Closes: #867
    
    Signed-off-by: Manikandan R <[email protected]>
---
 test/e2e/configmap/configmap_test.go               | 109 +++++++++++++++++++++
 test/e2e/user_group_limit/user_group_limit_test.go |  75 ++++++++++++++
 2 files changed, 184 insertions(+)

diff --git a/test/e2e/configmap/configmap_test.go 
b/test/e2e/configmap/configmap_test.go
index 434d8f2a..15cfb095 100644
--- a/test/e2e/configmap/configmap_test.go
+++ b/test/e2e/configmap/configmap_test.go
@@ -24,9 +24,12 @@ import (
        "io"
        "time"
 
+       "github.com/onsi/gomega"
        v1 "k8s.io/api/core/v1"
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
        "github.com/apache/yunikorn-core/pkg/common/configs"
+       "github.com/apache/yunikorn-k8shim/pkg/common/constants"
        tests "github.com/apache/yunikorn-k8shim/test/e2e"
        "github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
        "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
@@ -100,6 +103,112 @@ var _ = Describe("ConfigMap", func() {
                checkSchedulerConfig(schedulerConfig)
        })
 
+       It("Configure the scheduler with an valid queue name", func() {
+               validConfig := `
+partitions:
+  - name: default
+    placementrules:
+      - name: tag
+        value: namespace
+        create: true
+    queues:
+      - name: root_Test-a_b_#_c_#_d_/_e@dom:ain
+        submitacl: '*'
+`
+               data := map[string]string{"queues.yaml": validConfig}
+               validConfigMap := &v1.ConfigMap{
+                       ObjectMeta: metav1.ObjectMeta{
+                               Name:      constants.ConfigMapName,
+                               Namespace: 
configmanager.YuniKornTestConfig.YkNamespace,
+                       },
+                       Data: data,
+               }
+               cm, err := kClient.UpdateConfigMap(validConfigMap, 
configmanager.YuniKornTestConfig.YkNamespace)
+               gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+               gomega.Ω(cm).ShouldNot(gomega.BeNil())
+       })
+
+       It("Configure the scheduler with an invalid queue name", func() {
+               invalidConfig := `
+partitions:
+  - name: default
+    placementrules:
+      - name: tag
+        value: namespace
+        create: true
+    queues:
+      - name: ro!ot
+        submitacl: '*'
+`
+               invalidConfigData := map[string]string{"queues.yaml": 
invalidConfig}
+               invalidConfigMap := &v1.ConfigMap{
+                       ObjectMeta: metav1.ObjectMeta{
+                               Name:      constants.ConfigMapName,
+                               Namespace: 
configmanager.YuniKornTestConfig.YkNamespace,
+                       },
+                       Data: invalidConfigData,
+               }
+               _, invalidConfigErr := 
kClient.UpdateConfigMap(invalidConfigMap, 
configmanager.YuniKornTestConfig.YkNamespace)
+               gomega.Ω(invalidConfigErr).Should(gomega.HaveOccurred())
+       })
+
+       It("Configure the scheduler with an valid user name in placement rule 
filter", func() {
+               validConfig := `
+partitions:
+  - name: default
+    placementrules:
+      - name: fixed
+        value: root_Test-a_b_#_c_#_d_/_e@dom:ain
+        create: true
+        filter:
+          type: allow
+          users:
+            - user_Test-a_b_#_c_#_d_/_e@dom:ain.com
+    queues:
+      - name: root
+        submitacl: '*'
+`
+               data := map[string]string{"queues.yaml": validConfig}
+               validConfigMap := &v1.ConfigMap{
+                       ObjectMeta: metav1.ObjectMeta{
+                               Name:      constants.ConfigMapName,
+                               Namespace: 
configmanager.YuniKornTestConfig.YkNamespace,
+                       },
+                       Data: data,
+               }
+               cm, err := kClient.UpdateConfigMap(validConfigMap, 
configmanager.YuniKornTestConfig.YkNamespace)
+               gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+               gomega.Ω(cm).ShouldNot(gomega.BeNil())
+       })
+
+       It("Configure the scheduler with an invalid user name in placement rule 
filter", func() {
+               invalidConfig := `
+partitions:
+  - name: default
+    placementrules:
+      - name: fixed
+        value: root_Test-a_b_#_c_#_d_/_e@dom:ain
+        create: true
+        filter:
+          type: allow
+          users:
+            - user_inva!lid
+    queues:
+      - name: root
+        submitacl: '*'
+`
+               invalidConfigData := map[string]string{"queues.yaml": 
invalidConfig}
+               invalidConfigMap := &v1.ConfigMap{
+                       ObjectMeta: metav1.ObjectMeta{
+                               Name:      constants.ConfigMapName,
+                               Namespace: 
configmanager.YuniKornTestConfig.YkNamespace,
+                       },
+                       Data: invalidConfigData,
+               }
+               _, invalidConfigErr := 
kClient.UpdateConfigMap(invalidConfigMap, 
configmanager.YuniKornTestConfig.YkNamespace)
+               gomega.Ω(invalidConfigErr).Should(gomega.HaveOccurred())
+       })
+
        AfterEach(func() {
                tests.DumpClusterInfoIfSpecFailed(suiteName, 
[]string{"default"})
                yunikorn.RestoreConfigMapWrapper(oldConfigMap)
diff --git a/test/e2e/user_group_limit/user_group_limit_test.go 
b/test/e2e/user_group_limit/user_group_limit_test.go
index 46aaeb80..e4d74413 100644
--- a/test/e2e/user_group_limit/user_group_limit_test.go
+++ b/test/e2e/user_group_limit/user_group_limit_test.go
@@ -21,6 +21,7 @@ package user_group_limit_test
 import (
        "encoding/json"
        "fmt"
+       "net/url"
        "runtime"
        "time"
 
@@ -566,6 +567,80 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
                checkUsage(userTestType, user2, sandboxQueue1, 
[]*v1.Pod{usergroup2Sandbox1Pod1})
        })
 
+       
ginkgo.It("Verify_maxresources_with_a_valid_user_name_and_specific_user_limit", 
func() {
+               ginkgo.By("Update config")
+               validUser := "user_Test-a_b_#_c_#_d_/_e@dom:ain.com"
+               // The wait wrapper still can't fully guarantee that the config 
in AdmissionController has been updated.
+               
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+                       
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", 
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+                               // remove placement rules so we can control 
queue
+                               sc.Partitions[0].PlacementRules = nil
+
+                               if err := common.AddQueue(sc, 
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
+                                       Name: "sandbox1",
+                                       Limits: []configs.Limit{
+                                               {
+                                                       Limit:           "user 
entry",
+                                                       Users:           
[]string{validUser},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+                                                       },
+                                               },
+                                       },
+                               }); err != nil {
+                                       return err
+                               }
+                               return common.AddQueue(sc, 
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: 
"sandbox2"})
+                       })
+               })
+
+               // usergroup1 can deploy the first sleep pod to root.sandbox1
+               usergroup1 := &si.UserGroupInformation{User: validUser, Groups: 
[]string{group1}}
+
+               // usergroup1 can't deploy the second sleep pod to root.sandbox1
+               usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, 
sandboxQueue1, true, "because memory usage is less than maxresources")
+               deploySleepPod(usergroup1, sandboxQueue1, false, "because final 
memory usage is more than maxresources")
+               checkUsage(userTestType, url.QueryEscape(validUser), 
sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1})
+       })
+
+       
ginkgo.It("Verify_maxresources_with_a_valid_group_name_and_specific_group_limit",
 func() {
+               ginkgo.By("Update config")
+               validGroup := "group_Test-a_b_dom:ain.com"
+               // The wait wrapper still can't fully guarantee that the config 
in AdmissionController has been updated.
+               
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+                       
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", 
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+                               // remove placement rules so we can control 
queue
+                               sc.Partitions[0].PlacementRules = nil
+
+                               if err := common.AddQueue(sc, 
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
+                                       Name: "sandbox1",
+                                       Limits: []configs.Limit{
+                                               {
+                                                       Limit:           "group 
entry",
+                                                       Groups:          
[]string{validGroup},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+                                                       },
+                                               },
+                                       },
+                               }); err != nil {
+                                       return err
+                               }
+                               return common.AddQueue(sc, 
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: 
"sandbox2"})
+                       })
+               })
+
+               // usergroup1 can deploy the first sleep pod to root.sandbox1
+               usergroup1 := &si.UserGroupInformation{User: user1, Groups: 
[]string{validGroup}}
+
+               // usergroup1 can't deploy the second sleep pod to root.sandbox1
+               usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, 
sandboxQueue1, true, "because memory usage is less than maxresources")
+               _ = deploySleepPod(usergroup1, sandboxQueue1, false, "because 
final memory usage is more than maxresources")
+               checkUsage(groupTestType, url.QueryEscape(validGroup), 
sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1})
+       })
+
        ginkgo.AfterEach(func() {
                tests.DumpClusterInfoIfSpecFailed(suiteName, []string{ns.Name})
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to