This is an automated email from the ASF dual-hosted git repository.

mani pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git


The following commit(s) were added to refs/heads/master by this push:
     new dd02f2e9 [YUNIKORN-1956] Add wildcard user/group limit e2e tests
dd02f2e9 is described below

commit dd02f2e9055e1c262295a5aa87e9072a6f9d9c83
Author: PoAn Yang <[email protected]>
AuthorDate: Sun Dec 3 19:51:28 2023 +0530

    [YUNIKORN-1956] Add wildcard user/group limit e2e tests
    
    Closes: #738
    
    Signed-off-by: Manikandan R <[email protected]>
---
 go.mod                                             |   2 +-
 go.sum                                             |   4 +-
 test/e2e/user_group_limit/user_group_limit_test.go | 192 +++++++++++++++++++++
 3 files changed, 195 insertions(+), 3 deletions(-)

diff --git a/go.mod b/go.mod
index 3db611dd..c6226485 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,7 @@ module github.com/apache/yunikorn-k8shim
 go 1.20
 
 require (
-       github.com/apache/yunikorn-core v0.0.0-20231025072825-bc7c00124c4c
+       github.com/apache/yunikorn-core v0.0.0-20231127054725-3b9c96615796
        github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20231020041412-6f80d179257c
        github.com/google/go-cmp v0.6.0
        github.com/google/uuid v1.3.1
diff --git a/go.sum b/go.sum
index 7b918c09..819feafd 100644
--- a/go.sum
+++ b/go.sum
@@ -49,8 +49,8 @@ github.com/alecthomas/units 
v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
 github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod 
h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
 github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 
h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
 github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod 
h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
-github.com/apache/yunikorn-core v0.0.0-20231025072825-bc7c00124c4c 
h1:8LSd2HEo3u/6RjLoxkuqV8j6UKH9Qs109bd/qza6b30=
-github.com/apache/yunikorn-core v0.0.0-20231025072825-bc7c00124c4c/go.mod 
h1:rlS7KYZHyIoWFe06goLBPNkqAtDxk61G+KFpNYOBmvM=
+github.com/apache/yunikorn-core v0.0.0-20231127054725-3b9c96615796 
h1:3OiAqfOtLrldegMNOq6kcq/vDMo4mqhqQkOS96uI0Ik=
+github.com/apache/yunikorn-core v0.0.0-20231127054725-3b9c96615796/go.mod 
h1:nZRI1fm9wa3bhdD4tpDtrEh7ll/Ft/z+NG/gi8l8M14=
 github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20231020041412-6f80d179257c 
h1:KTIC3f+3aQdAo42YRxs27VpDWY6y73bxXpWcAii2IlQ=
 github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20231020041412-6f80d179257c/go.mod 
h1:3NQfrhroMqU++kDTroBrTyCRKAczwwX//Fkj/ag/rsY=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 
h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
diff --git a/test/e2e/user_group_limit/user_group_limit_test.go 
b/test/e2e/user_group_limit/user_group_limit_test.go
index 8475cf9a..9d429721 100644
--- a/test/e2e/user_group_limit/user_group_limit_test.go
+++ b/test/e2e/user_group_limit/user_group_limit_test.go
@@ -379,6 +379,198 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
                checkUsage(userTestType, user1, "root.sandbox1", 
[]*v1.Pod{usergroup1Sandbox1Pod1})
        })
 
+       ginkgo.It("Verify_maxresources_with_a_wildcard_user_limit", func() {
+               ginkgo.By("Update config")
+               annotation = "ann-" + common.RandSeq(10)
+               // The wait wrapper still can't fully guarantee that the config 
in AdmissionController has been updated.
+               
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+                       
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, 
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+                               // remove placement rules so we can control 
queue
+                               sc.Partitions[0].PlacementRules = nil
+
+                               return common.AddQueue(sc, "default", "root", 
configs.QueueConfig{
+                                       Name: "sandbox1",
+                                       Limits: []configs.Limit{
+                                               {
+                                                       Limit:           "user 
entry",
+                                                       Users:           
[]string{user1},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+                                                       },
+                                               },
+                                               {
+                                                       Limit:           
"wildcard user entry",
+                                                       Users:           
[]string{"*"},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+                                                       },
+                                               },
+                                       },
+                               })
+                       })
+               })
+
+               // usergroup1 can deploy 2 sleep pods to root.sandbox1
+               usergroup1 := &si.UserGroupInformation{User: user1, Groups: 
[]string{group1}}
+               usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is less than user entry limit")
+               usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is equal to user entry limit")
+               checkUsage(userTestType, user1, "root.sandbox1", 
[]*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})
+
+               // usergroup2 can deploy 1 sleep pod to root.sandbox1
+               usergroup2 := &si.UserGroupInformation{User: user2, Groups: 
[]string{group2}}
+               usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, 
"root.sandbox1", true, "because usage is less than wildcard user entry limit")
+
+               // usergroup2 can't deploy the second sleep pod to root.sandbox1
+               deploySleepPod(usergroup2, "root.sandbox1", false, "because 
final memory usage is more than wildcard maxresources")
+               checkUsage(userTestType, user2, "root.sandbox1", 
[]*v1.Pod{usergroup2Sandbox1Pod1})
+       })
+
+       ginkgo.It("Verify_maxapplications_with_a_wildcard_user_limit", func() {
+               ginkgo.By("Update config")
+               annotation = "ann-" + common.RandSeq(10)
+               // The wait wrapper still can't fully guarantee that the config 
in AdmissionController has been updated.
+               
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+                       
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, 
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+                               // remove placement rules so we can control 
queue
+                               sc.Partitions[0].PlacementRules = nil
+
+                               return common.AddQueue(sc, "default", "root", 
configs.QueueConfig{
+                                       Name: "sandbox1",
+                                       Limits: []configs.Limit{
+                                               {
+                                                       Limit:           "user 
entry",
+                                                       Users:           
[]string{user1},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+                                                       },
+                                               },
+                                               {
+                                                       Limit:           
"wildcard user entry",
+                                                       Users:           
[]string{"*"},
+                                                       MaxApplications: 1,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+                                                       },
+                                               },
+                                       },
+                               })
+                       })
+               })
+
+               // usergroup1 can deploy 2 sleep pods to root.sandbox1
+               usergroup1 := &si.UserGroupInformation{User: user1, Groups: 
[]string{group1}}
+               usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is less than user entry limit")
+               usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is equal to user entry limit")
+               checkUsage(userTestType, user1, "root.sandbox1", 
[]*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})
+
+               // usergroup2 can deploy 1 sleep pod to root.sandbox1
+               usergroup2 := &si.UserGroupInformation{User: user2, Groups: 
[]string{group2}}
+               usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, 
"root.sandbox1", true, "because usage is less than wildcard user entry limit")
+
+               // usergroup2 can't deploy the second sleep pod to root.sandbox1
+               deploySleepPod(usergroup2, "root.sandbox1", false, "because 
final application count is more than wildcard maxapplications")
+               checkUsage(userTestType, user2, "root.sandbox1", 
[]*v1.Pod{usergroup2Sandbox1Pod1})
+       })
+
+       ginkgo.It("Verify_maxresources_with_a_wildcard_group_limit", func() {
+               ginkgo.By("Update config")
+               annotation = "ann-" + common.RandSeq(10)
+               // The wait wrapper still can't fully guarantee that the config 
in AdmissionController has been updated.
+               
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+                       
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, 
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+                               // remove placement rules so we can control 
queue
+                               sc.Partitions[0].PlacementRules = nil
+
+                               return common.AddQueue(sc, "default", "root", 
configs.QueueConfig{
+                                       Name: "sandbox1",
+                                       Limits: []configs.Limit{
+                                               {
+                                                       Limit:           "group 
entry",
+                                                       Groups:          
[]string{group1},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+                                                       },
+                                               },
+                                               {
+                                                       Limit:           
"wildcard group entry",
+                                                       Groups:          
[]string{"*"},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+                                                       },
+                                               },
+                                       },
+                               })
+                       })
+               })
+
+               // usergroup1 can deploy 2 sleep pods to root.sandbox1
+               usergroup1 := &si.UserGroupInformation{User: user1, Groups: 
[]string{group1}}
+               usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is less than user entry limit")
+               usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is equal to user entry limit")
+               checkUsage(userTestType, user1, "root.sandbox1", 
[]*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})
+
+               // usergroup2 can deploy 1 sleep pod to root.sandbox1
+               usergroup2 := &si.UserGroupInformation{User: user2, Groups: 
[]string{group2}}
+               usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, 
"root.sandbox1", true, "because usage is less than wildcard user entry limit")
+
+               // usergroup2 can't deploy the second sleep pod to root.sandbox1
+               deploySleepPod(usergroup2, "root.sandbox1", false, "because 
final memory usage is more than wildcard maxresources")
+               checkUsage(userTestType, user2, "root.sandbox1", 
[]*v1.Pod{usergroup2Sandbox1Pod1})
+       })
+
+       ginkgo.It("Verify_maxapplications_with_a_wildcard_group_limit", func() {
+               ginkgo.By("Update config")
+               annotation = "ann-" + common.RandSeq(10)
+               // The wait wrapper still can't fully guarantee that the config 
in AdmissionController has been updated.
+               
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+                       
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, 
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+                               // remove placement rules so we can control 
queue
+                               sc.Partitions[0].PlacementRules = nil
+
+                               return common.AddQueue(sc, "default", "root", 
configs.QueueConfig{
+                                       Name: "sandbox1",
+                                       Limits: []configs.Limit{
+                                               {
+                                                       Limit:           "group 
entry",
+                                                       Groups:          
[]string{group1},
+                                                       MaxApplications: 2,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+                                                       },
+                                               },
+                                               {
+                                                       Limit:           
"wildcard group entry",
+                                                       Groups:          
[]string{"*"},
+                                                       MaxApplications: 1,
+                                                       MaxResources: 
map[string]string{
+                                                               
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+                                                       },
+                                               },
+                                       },
+                               })
+                       })
+               })
+
+               // usergroup1 can deploy 2 sleep pods to root.sandbox1
+               usergroup1 := &si.UserGroupInformation{User: user1, Groups: 
[]string{group1}}
+               usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is less than group entry limit")
+               usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, 
"root.sandbox1", true, "because usage is equal to group entry limit")
+               checkUsage(userTestType, user1, "root.sandbox1", 
[]*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})
+
+               // usergroup2 can deploy 1 sleep pod to root.sandbox1
+               usergroup2 := &si.UserGroupInformation{User: user2, Groups: 
[]string{group2}}
+               usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, 
"root.sandbox1", true, "because usage is less than wildcard group entry limit")
+
+               // usergroup2 can't deploy the second sleep pod to root.sandbox1
+               deploySleepPod(usergroup2, "root.sandbox1", false, "because 
final application count is more than wildcard maxapplications")
+               checkUsage(userTestType, user2, "root.sandbox1", 
[]*v1.Pod{usergroup2Sandbox1Pod1})
+       })
+
        ginkgo.AfterEach(func() {
                testDescription := ginkgo.CurrentSpecReport()
                if testDescription.Failed() {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to