This is an automated email from the ASF dual-hosted git repository.
ccondit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/master by this push:
new cc915524 [YUNIKORN-1126] Add e2e test for best effort pod (#904)
cc915524 is described below
commit cc915524e21a58c2f94eae85202c3a8640599dcd
Author: rrajesh <[email protected]>
AuthorDate: Thu Sep 5 15:20:07 2024 -0500
[YUNIKORN-1126] Add e2e test for best effort pod (#904)
Closes: #904
Signed-off-by: Craig Condit <[email protected]>
---
test/e2e/basic_scheduling/basic_scheduling_test.go | 72 ++++++++++++++++++++++
test/e2e/framework/helpers/k8s/pod_conf.go | 18 ++++--
2 files changed, 84 insertions(+), 6 deletions(-)
diff --git a/test/e2e/basic_scheduling/basic_scheduling_test.go
b/test/e2e/basic_scheduling/basic_scheduling_test.go
index a95e091a..9f3dca89 100644
--- a/test/e2e/basic_scheduling/basic_scheduling_test.go
+++ b/test/e2e/basic_scheduling/basic_scheduling_test.go
@@ -119,6 +119,78 @@ var _ = ginkgo.Describe("", func() {
Ω(resMap["vcore"]).To(gomega.Equal(core))
})
+ ginkgo.It("Verify_BestEffort_QOS_Pod_Scheduling", func() {
+ ginkgo.By("Create a pod with QOS class set to BestEffort")
+ bestEffortPodConfig := k8s.SleepPodConfig{Name:
"besteffortpod", NS: dev, QOSClass: v1.PodQOSBestEffort}
+ initPod, podErr := k8s.InitSleepPod(bestEffortPodConfig)
+ gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+ bestEffortPod, err := kClient.CreatePod(initPod, dev)
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+ ginkgo.By("Wait for the pod to move to running state")
+ err = kClient.WaitForPodRunning(dev, bestEffortPodConfig.Name,
30*time.Second)
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+ ginkgo.By("Verify that the pod is scheduled and running")
+ appsInfo, err = restClient.GetAppInfo("default", "root."+dev,
bestEffortPod.ObjectMeta.Labels["applicationId"])
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+ gomega.Ω(appsInfo).NotTo(gomega.BeNil())
+ gomega.Ω(appsInfo.State).To(gomega.Equal("Running"))
+
+ ginkgo.By("Verify that the pod's QOS class is BestEffort")
+
gomega.Ω(bestEffortPod.Status.QOSClass).To(gomega.Equal(v1.PodQOSBestEffort))
+
+ ginkgo.By("Verify that the pod's scheduler name is yunikorn")
+
gomega.Ω("yunikorn").To(gomega.Equal(bestEffortPod.Spec.SchedulerName))
+ allocation := appsInfo.Allocations[0]
+ gomega.Ω(allocation).NotTo(gomega.BeNil())
+ gomega.Ω(allocation.AllocationKey).NotTo(gomega.BeNil())
+ gomega.Ω(allocation.NodeID).NotTo(gomega.BeNil())
+
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(bestEffortPod.ObjectMeta.Labels["applicationId"]))
+ core :=
bestEffortPod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
+ mem :=
bestEffortPod.Spec.Containers[0].Resources.Requests.Memory().Value()
+ resMap := allocation.ResourcePerAlloc
+ Ω(len(resMap)).NotTo(gomega.BeZero())
+ Ω(resMap["memory"]).To(gomega.Equal(mem))
+ Ω(resMap["vcore"]).To(gomega.Equal(core))
+ })
+
+ ginkgo.It("Verify_NonBestEffort_QOS_Pod_Scheduling", func() {
+ ginkgo.By("Create a pod with QOS class set to Burstable")
+ burstablePodConfig := k8s.SleepPodConfig{Name: "burstablepod",
NS: dev, QOSClass: v1.PodQOSBurstable}
+ initPod, podErr := k8s.InitSleepPod(burstablePodConfig)
+ gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
+ burstablePod, err := kClient.CreatePod(initPod, dev)
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+ ginkgo.By("Wait for the pod to move to running state")
+ err = kClient.WaitForPodRunning(dev, burstablePodConfig.Name,
30*time.Second)
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+
+ ginkgo.By("Verify that the pod is scheduled and running")
+ appsInfo, err = restClient.GetAppInfo("default", "root."+dev,
burstablePod.ObjectMeta.Labels["applicationId"])
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+ gomega.Ω(appsInfo).NotTo(gomega.BeNil())
+ gomega.Ω(appsInfo.State).To(gomega.Equal("Running"))
+
+ ginkgo.By("Verify that the pod's QOS class is not BestEffort")
+
gomega.Ω(burstablePod.Status.QOSClass).NotTo(gomega.Equal(v1.PodQOSBestEffort))
+
+ ginkgo.By("Verify that the pod's scheduler name is yunikorn")
+
gomega.Ω("yunikorn").To(gomega.Equal(burstablePod.Spec.SchedulerName))
+ allocation := appsInfo.Allocations[0]
+ gomega.Ω(allocation).NotTo(gomega.BeNil())
+ gomega.Ω(allocation.AllocationKey).NotTo(gomega.BeNil())
+ gomega.Ω(allocation.NodeID).NotTo(gomega.BeNil())
+
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(burstablePod.ObjectMeta.Labels["applicationId"]))
+ core :=
burstablePod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
+ mem :=
burstablePod.Spec.Containers[0].Resources.Requests.Memory().Value()
+ resMap := allocation.ResourcePerAlloc
+ Ω(len(resMap)).NotTo(gomega.BeZero())
+ Ω(resMap["memory"]).To(gomega.Equal(mem))
+ Ω(resMap["vcore"]).To(gomega.Equal(core))
+ })
+
ginkgo.AfterEach(func() {
tests.DumpClusterInfoIfSpecFailed(suiteName, []string{dev})
// call the healthCheck api to check scheduler health
diff --git a/test/e2e/framework/helpers/k8s/pod_conf.go
b/test/e2e/framework/helpers/k8s/pod_conf.go
index 7ea68ccc..6f54060d 100644
--- a/test/e2e/framework/helpers/k8s/pod_conf.go
+++ b/test/e2e/framework/helpers/k8s/pod_conf.go
@@ -41,6 +41,7 @@ type SleepPodConfig struct {
RequiredNode string
Optedout AllowPreemptOpted
Labels map[string]string
+ QOSClass v1.PodQOSClass
}
type AllowPreemptOpted int
@@ -126,12 +127,17 @@ func InitSleepPod(conf SleepPodConfig) (*v1.Pod, error) {
Command: []string{"sleep",
strconv.Itoa(conf.Time)},
Annotations: annotation,
Labels: labels,
- Resources: &v1.ResourceRequirements{
- Requests: v1.ResourceList{
- "cpu":
resource.MustParse(strconv.FormatInt(conf.CPU, 10) + "m"),
- "memory":
resource.MustParse(strconv.FormatInt(conf.Mem, 10) + "M"),
- },
- },
+ Resources: func() *v1.ResourceRequirements {
+ if conf.QOSClass != v1.PodQOSBestEffort {
+ return &v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ "cpu":
resource.MustParse(strconv.FormatInt(conf.CPU, 10) + "m"),
+ "memory":
resource.MustParse(strconv.FormatInt(conf.Mem, 10) + "M"),
+ },
+ }
+ }
+ return nil
+ }(),
Affinity: affinity,
OwnerReferences: owners,
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]