This is an automated email from the ASF dual-hosted git repository.
chenyulin0719 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/master by this push:
new 368330e7 [YUNIKORN-3063] Refactor simple_preemptor E2E test suite
structure (#971)
368330e7 is described below
commit 368330e7b62a283d762c748cec1e8ae6cfdda096
Author: Alex Wu <[email protected]>
AuthorDate: Mon Jan 19 00:14:02 2026 +0800
[YUNIKORN-3063] Refactor simple_preemptor E2E test suite structure (#971)
Signed-off-by: Alex Wu <[email protected]>
Closes: #971
Signed-off-by: Yu-Lin Chen <[email protected]>
---
.../simple_preemptor_suite_test.go | 88 +++++++++++++++++++
test/e2e/simple_preemptor/simple_preemptor_test.go | 99 ++--------------------
2 files changed, 96 insertions(+), 91 deletions(-)
diff --git a/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
b/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
index d0372bf6..acbb143d 100644
--- a/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
+++ b/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
@@ -20,6 +20,8 @@ package simple_preemptor_test
import (
"path/filepath"
+ "runtime"
+ "strings"
"testing"
"github.com/onsi/ginkgo/v2"
@@ -27,6 +29,12 @@ import (
"github.com/onsi/gomega"
"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
)
func init() {
@@ -46,6 +54,86 @@ func TestSimplePreemptor(t *testing.T) {
ginkgo.RunSpecs(t, "TestSimplePreemptor",
ginkgo.Label("TestSimplePreemptor"))
}
+var _ = ginkgo.BeforeSuite(func() {
+ _, filename, _, _ := runtime.Caller(0)
+ suiteName = common.GetSuiteName(filename)
+ // Initializing kubectl client
+ kClient = k8s.KubeCtl{}
+ Ω(kClient.SetClient()).To(gomega.BeNil())
+ // Initializing rest client
+ restClient = yunikorn.RClient{}
+ Ω(restClient).NotTo(gomega.BeNil())
+
+ yunikorn.EnsureYuniKornConfigsPresent()
+ yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
+
+ ginkgo.By("Port-forward the scheduler pod")
+ var err = kClient.PortForwardYkSchedulerPod()
+ Ω(err).NotTo(gomega.HaveOccurred())
+
+ var nodes *v1.NodeList
+ nodes, err = kClient.GetNodes()
+ Ω(err).NotTo(gomega.HaveOccurred())
+ Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Events cant be empty")
+
+ // Extract node allocatable resources
+ for _, node := range nodes.Items {
+ // skip master if it's marked as such
+ node := node
+ if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+ continue
+ }
+ if Worker1 == "" {
+ Worker1 = node.Name
+ Worker1Res = node.Status.Allocatable.Memory()
+ } else if Worker2 == "" {
+ Worker2 = node.Name
+ Worker2Res = node.Status.Allocatable.Memory()
+ } else {
+ nodesToTaint = append(nodesToTaint, node.Name)
+ }
+ }
+ ginkgo.By("Worker1:" + Worker1)
+ ginkgo.By("Worker2:" + Worker2)
+
+ ginkgo.By("Tainting some nodes..")
+ err = kClient.TaintNodes(nodesToTaint, taintKey, "value",
v1.TaintEffectNoSchedule)
+ Ω(err).NotTo(gomega.HaveOccurred())
+
+ var pods *v1.PodList
+ totalPodQuantity1 := *resource.NewQuantity(0, resource.DecimalSI)
+ totalPodQuantity2 := *resource.NewQuantity(0, resource.DecimalSI)
+ pods, err = kClient.GetPods("yunikorn")
+ if err == nil {
+ for _, pod := range pods.Items {
+ for _, c := range pod.Spec.Containers {
+ switch pod.Spec.NodeName {
+ case Worker1:
+
totalPodQuantity1.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
resource.DecimalSI))
+ case Worker2:
+
totalPodQuantity2.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
resource.DecimalSI))
+ }
+ }
+ }
+ }
+ Worker1Res.Sub(totalPodQuantity1)
+ sleepPodMemLimit1 = int64(float64(Worker1Res.Value())/3.5) / (1000 *
1000)
+ Worker2Res.Sub(totalPodQuantity2)
+ sleepPodMemLimit2 = int64(float64(Worker2Res.Value())/3.5) / (1000 *
1000)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+ ginkgo.By("Untainting some nodes")
+ err := kClient.UntaintNodes(nodesToTaint, taintKey)
+ Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes
"+strings.Join(nodesToTaint, ","))
+
+ ginkgo.By("Check Yunikorn's health")
+ checks, err := yunikorn.GetFailedHealthChecks()
+ Ω(err).NotTo(gomega.HaveOccurred())
+ Ω(checks).To(gomega.Equal(""), checks)
+ yunikorn.RestoreConfigMapWrapper(oldConfigMap)
+})
+
var Ω = gomega.Ω
var HaveOccurred = gomega.HaveOccurred
var dev string
diff --git a/test/e2e/simple_preemptor/simple_preemptor_test.go
b/test/e2e/simple_preemptor/simple_preemptor_test.go
index eb7d356c..5613eccb 100644
--- a/test/e2e/simple_preemptor/simple_preemptor_test.go
+++ b/test/e2e/simple_preemptor/simple_preemptor_test.go
@@ -20,8 +20,6 @@ package simple_preemptor_test
import (
"fmt"
- "runtime"
- "strings"
"time"
tests "github.com/apache/yunikorn-k8shim/test/e2e"
@@ -52,96 +50,15 @@ var sleepPodMemLimit2 int64
var taintKey = "e2e_test_simple_preemptor"
var nodesToTaint []string
-var _ = ginkgo.BeforeSuite(func() {
- _, filename, _, _ := runtime.Caller(0)
- suiteName = common.GetSuiteName(filename)
- // Initializing kubectl client
- kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
- // Initializing rest client
- restClient = yunikorn.RClient{}
- Ω(restClient).NotTo(gomega.BeNil())
-
- yunikorn.EnsureYuniKornConfigsPresent()
- yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
-
- ginkgo.By("Port-forward the scheduler pod")
- var err = kClient.PortForwardYkSchedulerPod()
- Ω(err).NotTo(gomega.HaveOccurred())
-
- var nodes *v1.NodeList
- nodes, err = kClient.GetNodes()
- Ω(err).NotTo(gomega.HaveOccurred())
- Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Events cant be empty")
-
- // Extract node allocatable resources
- for _, node := range nodes.Items {
- // skip master if it's marked as such
- node := node
- if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
- continue
- }
- if Worker1 == "" {
- Worker1 = node.Name
- Worker1Res = node.Status.Allocatable.Memory()
- } else if Worker2 == "" {
- Worker2 = node.Name
- Worker2Res = node.Status.Allocatable.Memory()
- } else {
- nodesToTaint = append(nodesToTaint, node.Name)
- }
- }
- ginkgo.By("Worker1:" + Worker1)
- ginkgo.By("Worker2:" + Worker2)
-
- ginkgo.By("Tainting some nodes..")
- err = kClient.TaintNodes(nodesToTaint, taintKey, "value",
v1.TaintEffectNoSchedule)
- Ω(err).NotTo(gomega.HaveOccurred())
-
- var pods *v1.PodList
- totalPodQuantity1 := *resource.NewQuantity(0, resource.DecimalSI)
- totalPodQuantity2 := *resource.NewQuantity(0, resource.DecimalSI)
- pods, err = kClient.GetPods("yunikorn")
- if err == nil {
- for _, pod := range pods.Items {
- for _, c := range pod.Spec.Containers {
- switch pod.Spec.NodeName {
- case Worker1:
-
totalPodQuantity1.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
resource.DecimalSI))
- case Worker2:
-
totalPodQuantity2.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
resource.DecimalSI))
- }
- }
- }
- }
- Worker1Res.Sub(totalPodQuantity1)
- sleepPodMemLimit1 = int64(float64(Worker1Res.Value())/3.5) / (1000 *
1000)
- Worker2Res.Sub(totalPodQuantity2)
- sleepPodMemLimit2 = int64(float64(Worker2Res.Value())/3.5) / (1000 *
1000)
-})
-
-var _ = ginkgo.BeforeEach(func() {
- dev = "dev" + common.RandSeq(5)
- ginkgo.By("create development namespace")
- ns, err := kClient.CreateNamespace(dev, nil)
- gomega.Ω(err).NotTo(gomega.HaveOccurred())
- gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
-})
-
-var _ = ginkgo.AfterSuite(func() {
-
- ginkgo.By("Untainting some nodes")
- err := kClient.UntaintNodes(nodesToTaint, taintKey)
- Ω(err).NotTo(gomega.HaveOccurred(), "Could not remove taint from nodes
"+strings.Join(nodesToTaint, ","))
-
- ginkgo.By("Check Yunikorn's health")
- checks, err := yunikorn.GetFailedHealthChecks()
- Ω(err).NotTo(gomega.HaveOccurred())
- Ω(checks).To(gomega.Equal(""), checks)
- yunikorn.RestoreConfigMapWrapper(oldConfigMap)
-})
-
var _ = ginkgo.Describe("SimplePreemptor", func() {
+ ginkgo.BeforeEach(func() {
+ dev = "dev" + common.RandSeq(5)
+ ginkgo.By("create development namespace")
+ ns, err := kClient.CreateNamespace(dev, nil)
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+ gomega.Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+ })
+
ginkgo.It("Verify_basic_simple_preemption", func() {
// Use case: Only one pod is running and same pod has been
selected as victim
// Define sleepPod
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]