This is an automated email from the ASF dual-hosted git repository.
pbacsko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/master by this push:
new 98d71eaa [YUNIKORN-1998] Stale AdmissionControllerConf was used in e2e
test (#683)
98d71eaa is described below
commit 98d71eaa6c1f945b402ef7895419cbacf727100d
Author: Yu-Lin Chen <[email protected]>
AuthorDate: Thu Nov 2 15:29:28 2023 +0100
[YUNIKORN-1998] Stale AdmissionControllerConf was used in e2e test (#683)
Closes: #683
Signed-off-by: Peter Bacsko <[email protected]>
---
.../admission_controller_test.go | 101 +++-----
test/e2e/framework/helpers/k8s/events.go | 46 ++++
test/e2e/framework/helpers/yunikorn/wrappers.go | 8 +
test/e2e/user_group_limit/user_group_limit_test.go | 262 +++++++++++----------
4 files changed, 225 insertions(+), 192 deletions(-)
diff --git a/test/e2e/admission_controller/admission_controller_test.go
b/test/e2e/admission_controller/admission_controller_test.go
index ee265729..27b3bb56 100644
--- a/test/e2e/admission_controller/admission_controller_test.go
+++ b/test/e2e/admission_controller/admission_controller_test.go
@@ -41,31 +41,6 @@ const nonExistentNode = "non-existent-node"
const defaultPodTimeout = 10 * time.Second
const cronJobPodTimeout = 65 * time.Second
-type EventHandler struct {
- updateCh chan struct{}
-}
-
-func (e *EventHandler) OnAdd(_ interface{}, _ bool) {}
-
-func (e *EventHandler) OnUpdate(_, _ interface{}) {
- e.updateCh <- struct{}{}
-}
-
-func (e *EventHandler) OnDelete(_ interface{}) {}
-
-func (e *EventHandler) WaitForUpdate(timeout time.Duration) bool {
- t := time.After(timeout)
-
- for {
- select {
- case <-t:
- return false
- case <-e.updateCh:
- return true
- }
- }
-}
-
var _ = ginkgo.Describe("AdmissionController", func() {
ginkgo.BeforeEach(func() {
kubeClient = k8s.KubeCtl{}
@@ -317,16 +292,11 @@ var _ = ginkgo.Describe("AdmissionController", func() {
}
configMap.Data[amConf.AMAccessControlTrustControllers] = "false"
ginkgo.By("Update configmap")
- stopChan := make(chan struct{})
- eventHandler := &EventHandler{updateCh: make(chan struct{})}
- err =
kubeClient.StartConfigMapInformer(configmanager.YuniKornTestConfig.YkNamespace,
stopChan, eventHandler)
- defer close(stopChan)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- updateOk := eventHandler.WaitForUpdate(30 * time.Second)
- gomega.Ω(updateOk).To(gomega.Equal(true))
- time.Sleep(time.Second)
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+ _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+ })
ginkgo.By("Create a deployment")
deployment, err2 :=
kubeClient.CreateDeployment(&testDeployment, ns)
@@ -353,11 +323,11 @@ var _ = ginkgo.Describe("AdmissionController", func() {
configMap, err =
kubeClient.GetConfigMap(constants.ConfigMapName,
configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
configMap.Data[amConf.AMAccessControlTrustControllers] = "true"
- _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- updateOk = eventHandler.WaitForUpdate(30 * time.Second)
- gomega.Ω(updateOk).To(gomega.Equal(true))
- time.Sleep(time.Second)
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+ _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+ })
// pod is expected to appear
ginkgo.By("Check for sleep pod")
@@ -375,16 +345,11 @@ var _ = ginkgo.Describe("AdmissionController", func() {
}
configMap.Data[amConf.AMAccessControlExternalUsers] = ""
ginkgo.By("Update configmap")
- stopChan := make(chan struct{})
- eventHandler := &EventHandler{updateCh: make(chan struct{})}
- err =
kubeClient.StartConfigMapInformer(configmanager.YuniKornTestConfig.YkNamespace,
stopChan, eventHandler)
- defer close(stopChan)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- updateOk := eventHandler.WaitForUpdate(30 * time.Second)
- gomega.Ω(updateOk).To(gomega.Equal(true))
- time.Sleep(time.Second)
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+ _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+ })
ginkgo.By("Create a deployment")
deployment := testDeployment.DeepCopy()
@@ -400,11 +365,12 @@ var _ = ginkgo.Describe("AdmissionController", func() {
configMap, err =
kubeClient.GetConfigMap(constants.ConfigMapName,
configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
configMap.Data[amConf.AMAccessControlExternalUsers] =
"(^minikube-user$|^kubernetes-admin$)" // works with Minikube & KIND
- _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- updateOk = eventHandler.WaitForUpdate(30 * time.Second)
- gomega.Ω(updateOk).To(gomega.Equal(true))
- time.Sleep(time.Second)
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+ _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+
+ })
// submit deployment again
ginkgo.By("Submit deployment again")
@@ -428,16 +394,11 @@ var _ = ginkgo.Describe("AdmissionController", func() {
}
configMap.Data[amConf.AMAccessControlBypassAuth] = "true"
ginkgo.By("Update configmap (bypassAuth -> true)")
- stopChan := make(chan struct{})
- eventHandler := &EventHandler{updateCh: make(chan struct{})}
- err =
kubeClient.StartConfigMapInformer(configmanager.YuniKornTestConfig.YkNamespace,
stopChan, eventHandler)
- defer close(stopChan)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- updateOk := eventHandler.WaitForUpdate(30 * time.Second)
- gomega.Ω(updateOk).To(gomega.Equal(true))
- time.Sleep(time.Second)
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+ _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+ })
ginkgo.By("Submit a deployment")
deployment := testDeployment.DeepCopy()
@@ -453,11 +414,11 @@ var _ = ginkgo.Describe("AdmissionController", func() {
configMap, err =
kubeClient.GetConfigMap(constants.ConfigMapName,
configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
configMap.Data[amConf.AMAccessControlBypassAuth] = "false"
- _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
- gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
- updateOk = eventHandler.WaitForUpdate(30 * time.Second)
- gomega.Ω(updateOk).To(gomega.Equal(true))
- time.Sleep(time.Second)
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+ _, err = kubeClient.UpdateConfigMap(configMap,
configmanager.YuniKornTestConfig.YkNamespace)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+ })
ginkgo.By("Update container image in deployment")
deployment, err = kubeClient.GetDeployment(deployment.Name, ns)
diff --git a/test/e2e/framework/helpers/k8s/events.go
b/test/e2e/framework/helpers/k8s/events.go
index 06940f53..3edcaaa6 100644
--- a/test/e2e/framework/helpers/k8s/events.go
+++ b/test/e2e/framework/helpers/k8s/events.go
@@ -34,6 +34,8 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
+
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
)
func ScheduleSuccessEvent(ns, podName, nodeName string) func(*v1.Event) bool {
@@ -112,3 +114,47 @@ func ObserveEventAfterAction(c clientset.Interface, ns
string, eventPredicate fu
})
return err == nil, err
}
+
+type EventHandler struct {
+ updateCh chan struct{}
+}
+
+func (e *EventHandler) OnAdd(_ interface{}, _ bool) {}
+
+func (e *EventHandler) OnUpdate(_, _ interface{}) {
+ e.updateCh <- struct{}{}
+}
+
+func (e *EventHandler) OnDelete(_ interface{}) {}
+
+func (e *EventHandler) WaitForUpdate(timeout time.Duration) bool {
+ t := time.After(timeout)
+
+ for {
+ select {
+ case <-t:
+ return false
+ case <-e.updateCh:
+ return true
+ }
+ }
+}
+
+func ObserveConfigMapInformerUpdateAfterAction(action func()) {
+ kubeClient := KubeCtl{}
+ gomega.Expect(kubeClient.SetClient()).To(gomega.BeNil())
+
+ // Setup ConfigMap informer
+ stopChan := make(chan struct{})
+ eventHandler := &EventHandler{updateCh: make(chan struct{})}
+ err :=
kubeClient.StartConfigMapInformer(configmanager.YuniKornTestConfig.YkNamespace,
stopChan, eventHandler)
+ defer close(stopChan)
+ gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
+
+ // Trigger action
+ action()
+
+ // Wait for ConfigMap informer recevie update event.
+ updateOk := eventHandler.WaitForUpdate(30 * time.Second)
+ gomega.Ω(updateOk).To(gomega.Equal(true))
+}
diff --git a/test/e2e/framework/helpers/yunikorn/wrappers.go
b/test/e2e/framework/helpers/yunikorn/wrappers.go
index d7296f54..d11877c3 100644
--- a/test/e2e/framework/helpers/yunikorn/wrappers.go
+++ b/test/e2e/framework/helpers/yunikorn/wrappers.go
@@ -145,6 +145,14 @@ func RestoreConfigMapWrapper(oldConfigMap *v1.ConfigMap,
annotation string) {
Ω(err).NotTo(HaveOccurred())
}
+// There is no available method to check whether the config in admission
controller has been updated
+// As a temporary solution, we are checking the update event using the
informer, followed by a 1-second sleep.
+// Please refer to YUNIKORN-1998 for more details
+func WaitForAdmissionControllerRefreshConfAfterAction(action func()) {
+ k8s.ObserveConfigMapInformerUpdateAfterAction(action)
+ time.Sleep(1 * time.Second)
+}
+
var Describe = ginkgo.Describe
var It = ginkgo.It
var By = ginkgo.By
diff --git a/test/e2e/user_group_limit/user_group_limit_test.go
b/test/e2e/user_group_limit/user_group_limit_test.go
index 7803c631..98bd4442 100644
--- a/test/e2e/user_group_limit/user_group_limit_test.go
+++ b/test/e2e/user_group_limit/user_group_limit_test.go
@@ -104,26 +104,29 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_a_specific_user_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
- // remove placement rules so we can control queue
- sc.Partitions[0].PlacementRules = nil
-
- if err := common.AddQueue(sc, "default", "root",
configs.QueueConfig{
- Name: "sandbox1",
- Limits: []configs.Limit{
- {
- Limit: "user entry",
- Users:
[]string{user1},
- MaxApplications: 2,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", mediumMem),
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation,
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ if err := common.AddQueue(sc, "default",
"root", configs.QueueConfig{
+ Name: "sandbox1",
+ Limits: []configs.Limit{
+ {
+ Limit: "user
entry",
+ Users:
[]string{user1},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+ },
},
},
- },
- }); err != nil {
- return err
- }
- return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ }); err != nil {
+ return err
+ }
+ return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ })
})
// usergroup1 can deploy the first sleep pod to root.sandbox1
@@ -149,26 +152,29 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxapplications_with_a_specific_user_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
- // remove placement rules so we can control queue
- sc.Partitions[0].PlacementRules = nil
-
- if err := common.AddQueue(sc, "default", "root",
configs.QueueConfig{
- Name: "sandbox1",
- Limits: []configs.Limit{
- {
- Limit: "user entry",
- Users:
[]string{user1},
- MaxApplications: 1,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", largeMem),
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation,
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ if err := common.AddQueue(sc, "default",
"root", configs.QueueConfig{
+ Name: "sandbox1",
+ Limits: []configs.Limit{
+ {
+ Limit: "user
entry",
+ Users:
[]string{user1},
+ MaxApplications: 1,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+ },
},
},
- },
- }); err != nil {
- return err
- }
- return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ }); err != nil {
+ return err
+ }
+ return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ })
})
// usergroup1 can deploy the first sleep pod to root.sandbox1
@@ -194,26 +200,29 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_a_specific_group_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
- // remove placement rules so we can control queue
- sc.Partitions[0].PlacementRules = nil
-
- if err := common.AddQueue(sc, "default", "root",
configs.QueueConfig{
- Name: "sandbox1",
- Limits: []configs.Limit{
- {
- Limit: "group entry",
- Groups:
[]string{group1},
- MaxApplications: 2,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", mediumMem),
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation,
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ if err := common.AddQueue(sc, "default",
"root", configs.QueueConfig{
+ Name: "sandbox1",
+ Limits: []configs.Limit{
+ {
+ Limit: "group
entry",
+ Groups:
[]string{group1},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+ },
},
},
- },
- }); err != nil {
- return err
- }
- return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ }); err != nil {
+ return err
+ }
+ return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ })
})
// usergroup1 can deploy the first sleep pod to root.sandbox1
@@ -237,26 +246,29 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxapplications_with_a_specific_group_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
- // remove placement rules so we can control queue
- sc.Partitions[0].PlacementRules = nil
-
- if err := common.AddQueue(sc, "default", "root",
configs.QueueConfig{
- Name: "sandbox1",
- Limits: []configs.Limit{
- {
- Limit: "group entry",
- Groups:
[]string{group1},
- MaxApplications: 1,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", largeMem),
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation,
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ if err := common.AddQueue(sc, "default",
"root", configs.QueueConfig{
+ Name: "sandbox1",
+ Limits: []configs.Limit{
+ {
+ Limit: "group
entry",
+ Groups:
[]string{group1},
+ MaxApplications: 1,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+ },
},
},
- },
- }); err != nil {
- return err
- }
- return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ }); err != nil {
+ return err
+ }
+ return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ })
})
// usergroup1 can deploy the first sleep pod to root.sandbox1
@@ -280,34 +292,37 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_user_limit_lower_than_group_limit",
func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
- // remove placement rules so we can control queue
- sc.Partitions[0].PlacementRules = nil
-
- if err := common.AddQueue(sc, "default", "root",
configs.QueueConfig{
- Name: "sandbox1",
- Limits: []configs.Limit{
- {
- Limit: "user entry",
- Users:
[]string{user1},
- MaxApplications: 2,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", mediumMem),
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation,
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ if err := common.AddQueue(sc, "default",
"root", configs.QueueConfig{
+ Name: "sandbox1",
+ Limits: []configs.Limit{
+ {
+ Limit: "user
entry",
+ Users:
[]string{user1},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+ },
},
- },
- {
- Limit: "group entry",
- Groups:
[]string{group1},
- MaxApplications: 2,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", largeMem),
+ {
+ Limit: "group
entry",
+ Groups:
[]string{group1},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+ },
},
},
- },
- }); err != nil {
- return err
- }
- return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ }); err != nil {
+ return err
+ }
+ return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ })
})
// usergroup1 can deploy the first sleep pod to root.sandbox1
@@ -322,34 +337,37 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
ginkgo.It("Verify_maxresources_with_group_limit_lower_than_user_limit",
func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
- yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
- // remove placement rules so we can control queue
- sc.Partitions[0].PlacementRules = nil
-
- if err := common.AddQueue(sc, "default", "root",
configs.QueueConfig{
- Name: "sandbox1",
- Limits: []configs.Limit{
- {
- Limit: "user entry",
- Users:
[]string{user1},
- MaxApplications: 2,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", largeMem),
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation,
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ if err := common.AddQueue(sc, "default",
"root", configs.QueueConfig{
+ Name: "sandbox1",
+ Limits: []configs.Limit{
+ {
+ Limit: "user
entry",
+ Users:
[]string{user1},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+ },
},
- },
- {
- Limit: "group entry",
- Groups:
[]string{group1},
- MaxApplications: 2,
- MaxResources: map[string]string{
- siCommon.Memory:
fmt.Sprintf("%dM", mediumMem),
+ {
+ Limit: "group
entry",
+ Groups:
[]string{group1},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+ },
},
},
- },
- }); err != nil {
- return err
- }
- return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ }); err != nil {
+ return err
+ }
+ return common.AddQueue(sc, "default", "root",
configs.QueueConfig{Name: "sandbox2"})
+ })
})
// usergroup1 can deploy the first sleep pod to root.sandbox1
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]