This is an automated email from the ASF dual-hosted git repository.
mani pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/master by this push:
new fba2d0a7 [YUNIKORN-3229] Update go version to 1.26 (#1003)
fba2d0a7 is described below
commit fba2d0a7e10b9d8a0d1d2d516879e5d3615284d0
Author: Aditya Maheshwari <[email protected]>
AuthorDate: Mon Mar 2 11:54:54 2026 +0530
[YUNIKORN-3229] Update go version to 1.26 (#1003)
Closes: #1003
Signed-off-by: Manikandan R <[email protected]>
---
.go_version | 2 +-
Makefile | 8 +--
test/e2e/basic_scheduling/basic_scheduling_test.go | 6 +--
test/e2e/bin_packing/bin_packing_suite_test.go | 4 +-
test/e2e/bin_packing/bin_packing_test.go | 2 +-
test/e2e/configmap/configmap_suite_test.go | 3 +-
test/e2e/configmap/configmap_test.go | 10 ++--
test/e2e/foreign_pod/foreign_pod_suite_test.go | 1 +
test/e2e/foreign_pod/foreign_pod_test.go | 4 +-
.../framework/helpers/yunikorn/rest_api_utils.go | 2 +
test/e2e/framework/helpers/yunikorn/wrappers.go | 9 ++--
.../gang_scheduling/gang_scheduling_suite_test.go | 1 +
test/e2e/gang_scheduling/gang_scheduling_test.go | 58 +++++++++++-----------
.../node_resources/node_resources_suite_test.go | 1 +
test/e2e/node_resources/node_resources_test.go | 2 +-
.../persistent_volume_suite_test.go | 2 +-
.../pod_resource_scaling_suite_test.go | 2 +-
test/e2e/predicates/predicates_suite_test.go | 1 +
test/e2e/predicates/predicates_test.go | 2 +-
test/e2e/preemption/preemption_suite_test.go | 4 +-
.../priority_scheduling_suite_test.go | 5 +-
.../queue_quota_mgmt_suite_test.go | 1 +
test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go | 2 +-
.../recovery_and_restart_suite_test.go | 2 +-
.../recovery_and_restart_test.go | 8 +--
.../resource_fairness_suite_test.go | 3 +-
.../resource_fairness/resource_fairness_test.go | 4 +-
.../restart_changed_config_suite_test.go | 2 +-
.../simple_preemptor_suite_test.go | 4 +-
.../spark_jobs_scheduling_suite_test.go | 1 +
.../spark_jobs_scheduling_test.go | 2 +-
.../user_group_limit_suite_test.go | 2 +-
32 files changed, 87 insertions(+), 73 deletions(-)
diff --git a/.go_version b/.go_version
index 5e2b9500..5ff8c4f5 100644
--- a/.go_version
+++ b/.go_version
@@ -1 +1 @@
-1.25
+1.26.0
diff --git a/Makefile b/Makefile
index 5f7b522e..5bc6d6a9 100644
--- a/Makefile
+++ b/Makefile
@@ -27,7 +27,7 @@ GO_EXE_PATH := $(GOROOT)/bin
# Check if this GO tools version used is at least the version of go specified
in
# the go.mod file. The version in go.mod should be in sync with other repos.
GO_VERSION := $(shell "$(GO)" version | awk '{print substr($$3, 3, 4)}')
-MOD_VERSION := $(shell cat .go_version)
+MOD_VERSION := $(shell cat .go_version)
GM := $(word 1,$(subst ., ,$(GO_VERSION)))
MM := $(word 1,$(subst ., ,$(MOD_VERSION)))
@@ -79,7 +79,7 @@ endif
ifeq ($(PLUGIN),1)
PLUGIN_OPTS := --plugin
else
- PLUGIN_OPTS :=
+ PLUGIN_OPTS :=
endif
# Reproducible builds mode
@@ -163,7 +163,7 @@ endif
export PATH := $(BASE_DIR)/$(SHELLCHECK_PATH):$(PATH)
# golangci-lint
-GOLANGCI_LINT_VERSION=2.5.0
+GOLANGCI_LINT_VERSION=2.10.1
GOLANGCI_LINT_PATH=$(TOOLS_DIR)/golangci-lint-v$(GOLANGCI_LINT_VERSION)
GOLANGCI_LINT_BIN=$(GOLANGCI_LINT_PATH)/golangci-lint
GOLANGCI_LINT_ARCHIVE=golangci-lint-$(GOLANGCI_LINT_VERSION)-$(OS)-$(EXEC_ARCH).tar.gz
@@ -514,7 +514,7 @@ $(COVERAGE_DIR)/$(PLUGIN_BINARY): go.mod go.sum $(shell
find pkg)
-ldflags '-buildid= -extldflags "-static" -X
${FLAG_PREFIX}.buildVersion=${VERSION} -X ${FLAG_PREFIX}.buildDate=${DATE} -X
${FLAG_PREFIX}.isPluginVersion=true -X ${FLAG_PREFIX}.goVersion=${GO_VERSION}
-X ${FLAG_PREFIX}.arch=${EXEC_ARCH} -X ${FLAG_PREFIX}.coreSHA=${CORE_SHA} -X
${FLAG_PREFIX}.siSHA=${SI_SHA} -X ${FLAG_PREFIX}.shimSHA=${SHIM_SHA}' \
-tags netgo \
./pkg/cmd/schedulerplugin/
-
+
# Build a scheduler image based on the production ready version
.PHONY: sched_image
sched_image: $(OUTPUT)/third-party-licenses.md scheduler docker/scheduler
diff --git a/test/e2e/basic_scheduling/basic_scheduling_test.go
b/test/e2e/basic_scheduling/basic_scheduling_test.go
index 7ee27ed7..eae5b61c 100644
--- a/test/e2e/basic_scheduling/basic_scheduling_test.go
+++ b/test/e2e/basic_scheduling/basic_scheduling_test.go
@@ -85,7 +85,7 @@ var _ = ginkgo.Describe("", func() {
core :=
sleepRespPod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
mem :=
sleepRespPod.Spec.Containers[0].Resources.Requests.Memory().Value()
resMap := allocation.ResourcePerAlloc
- Ω(len(resMap)).NotTo(gomega.BeZero())
+ Ω(resMap).NotTo(gomega.BeEmpty())
Ω(resMap["memory"]).To(gomega.Equal(mem))
Ω(resMap["vcore"]).To(gomega.Equal(core))
})
@@ -121,7 +121,7 @@ var _ = ginkgo.Describe("", func() {
core :=
bestEffortPod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
mem :=
bestEffortPod.Spec.Containers[0].Resources.Requests.Memory().Value()
resMap := allocation.ResourcePerAlloc
- Ω(len(resMap)).NotTo(gomega.BeZero())
+ Ω(resMap).NotTo(gomega.BeEmpty())
Ω(resMap["memory"]).To(gomega.Equal(mem))
Ω(resMap["vcore"]).To(gomega.Equal(core))
})
@@ -157,7 +157,7 @@ var _ = ginkgo.Describe("", func() {
core :=
burstablePod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
mem :=
burstablePod.Spec.Containers[0].Resources.Requests.Memory().Value()
resMap := allocation.ResourcePerAlloc
- Ω(len(resMap)).NotTo(gomega.BeZero())
+ Ω(resMap).NotTo(gomega.BeEmpty())
Ω(resMap["memory"]).To(gomega.Equal(mem))
Ω(resMap["vcore"]).To(gomega.Equal(core))
})
diff --git a/test/e2e/bin_packing/bin_packing_suite_test.go
b/test/e2e/bin_packing/bin_packing_suite_test.go
index ae2b492b..d632838b 100644
--- a/test/e2e/bin_packing/bin_packing_suite_test.go
+++ b/test/e2e/bin_packing/bin_packing_suite_test.go
@@ -62,7 +62,7 @@ var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
/* Sample configMap. Post-update, Yunikorn will use binpacking node
sort and fair app sort
partitions:
- name: default
@@ -128,3 +128,5 @@ var Ω = gomega.Expect
var BeNil = gomega.BeNil
var BeNumerically = gomega.BeNumerically
var HaveOccurred = gomega.HaveOccurred
+var HaveLen = gomega.HaveLen
+var Succeed = gomega.Succeed
diff --git a/test/e2e/bin_packing/bin_packing_test.go
b/test/e2e/bin_packing/bin_packing_test.go
index 43b8ec6c..d471783b 100644
--- a/test/e2e/bin_packing/bin_packing_test.go
+++ b/test/e2e/bin_packing/bin_packing_test.go
@@ -183,7 +183,7 @@ var _ = Describe("", func() {
jobPods, lstErr := kClient.ListPods(ns,
fmt.Sprintf("job-name=%s", jobConf.Name))
Ω(lstErr).NotTo(HaveOccurred())
Ω(jobPods).NotTo(BeNil())
- Ω(len(jobPods.Items)).Should(Equal(int(3)), "Pods count
should be 3")
+ Ω(jobPods.Items).To(HaveLen(int(3)), "Pods count should
be 3")
for _, pod := range jobPods.Items {
Ω(pod.Spec.NodeName).To(Equal(sortedWorkerNodes[i].Name),
"job pods not scheduled to correct
node")
diff --git a/test/e2e/configmap/configmap_suite_test.go
b/test/e2e/configmap/configmap_suite_test.go
index 8224999a..b8108c80 100644
--- a/test/e2e/configmap/configmap_suite_test.go
+++ b/test/e2e/configmap/configmap_suite_test.go
@@ -63,7 +63,7 @@ var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
restClient = yunikorn.RClient{}
Ω(restClient).NotTo(BeNil())
@@ -90,3 +90,4 @@ var Equal = gomega.Equal
var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
+var Succeed = gomega.Succeed
diff --git a/test/e2e/configmap/configmap_test.go
b/test/e2e/configmap/configmap_test.go
index 6f3649c9..3d08b305 100644
--- a/test/e2e/configmap/configmap_test.go
+++ b/test/e2e/configmap/configmap_test.go
@@ -63,8 +63,8 @@ var _ = Describe("ConfigMap", func() {
schedulerConfig, err :=
configs.LoadSchedulerConfigFromByteArray([]byte(queues))
Ω(err).NotTo(HaveOccurred())
- Ω(len(schedulerConfig.Partitions)).To(Equal(1))
- Ω(len(schedulerConfig.Partitions[0].Queues)).To(Equal(1))
+ Ω(schedulerConfig.Partitions).To(gomega.HaveLen(1))
+ Ω(schedulerConfig.Partitions[0].Queues).To(gomega.HaveLen(1))
ts :=
schedulerConfig.Partitions[0].Queues[0].Properties["timestamp"]
err = yunikorn.WaitForQueueTS("root", ts, 30*time.Second)
@@ -84,7 +84,7 @@ var _ = Describe("ConfigMap", func() {
Ω(err).NotTo(HaveOccurred())
queuesGz :=
configMap.BinaryData[configmanager.DefaultPolicyGroup+".gz"]
- Ω(len(queuesGz)).NotTo(Equal(0))
+ Ω(queuesGz).ToNot(gomega.BeEmpty())
gzReader, err := gzip.NewReader(bytes.NewReader(queuesGz))
Ω(err).NotTo(HaveOccurred())
decompressedBytes, err := io.ReadAll(gzReader)
@@ -94,8 +94,8 @@ var _ = Describe("ConfigMap", func() {
schedulerConfig, err :=
configs.LoadSchedulerConfigFromByteArray(decompressedBytes)
Ω(err).NotTo(HaveOccurred())
- Ω(len(schedulerConfig.Partitions)).To(Equal(1))
- Ω(len(schedulerConfig.Partitions[0].Queues)).To(Equal(1))
+ Ω(schedulerConfig.Partitions).To(gomega.HaveLen(1))
+ Ω(schedulerConfig.Partitions[0].Queues).To(gomega.HaveLen(1))
ts :=
schedulerConfig.Partitions[0].Queues[0].Properties["timestamp"]
err = yunikorn.WaitForQueueTS("root", ts, 30*time.Second)
diff --git a/test/e2e/foreign_pod/foreign_pod_suite_test.go
b/test/e2e/foreign_pod/foreign_pod_suite_test.go
index e5a87cae..5ec84247 100644
--- a/test/e2e/foreign_pod/foreign_pod_suite_test.go
+++ b/test/e2e/foreign_pod/foreign_pod_suite_test.go
@@ -87,3 +87,4 @@ var BeNumerically = gomega.BeNumerically
var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
+var Succeed = gomega.Succeed
diff --git a/test/e2e/foreign_pod/foreign_pod_test.go
b/test/e2e/foreign_pod/foreign_pod_test.go
index 932afbfd..ab2dc1ba 100644
--- a/test/e2e/foreign_pod/foreign_pod_test.go
+++ b/test/e2e/foreign_pod/foreign_pod_test.go
@@ -35,7 +35,7 @@ var _ = Describe("", func() {
It("Verify foreign pod tracking", func() {
By("Retrieving foreign pods from kube-system")
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
podList, err := kClient.GetPods(kubeSystem)
Ω(err).NotTo(gomega.HaveOccurred())
@@ -67,7 +67,7 @@ var _ = Describe("", func() {
// check that all UIDs from kube-system are tracked properly
for uid := range kubeUIDs {
- Ω(foreignAllocs[uid]).To(Equal(true), "pod %s from
kube-system is not tracked in Yunikorn", uid)
+ Ω(foreignAllocs[uid]).To(gomega.BeTrue(), "pod %s from
kube-system is not tracked in Yunikorn", uid)
Ω(foreignNodes[uid]).To(Equal(kubeNodes[uid]), "pod %s
is tracked under incorrect node", uid)
}
})
diff --git a/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
b/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
index 5be30f2f..ee582658 100644
--- a/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
+++ b/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
@@ -78,6 +78,7 @@ func (c *RClient) newRequest(method, path string, body
interface{}) (*http.Reque
return req, nil
}
func (c *RClient) do(req *http.Request, v interface{}) (*http.Response, error)
{
+ //nolint:gosec // safe to ignore as these are tests.
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
@@ -98,6 +99,7 @@ func (c *RClient) do(req *http.Request, v interface{})
(*http.Response, error) {
}
func (c *RClient) getBody(req *http.Request) (string, error) {
+ //nolint:gosec // safe to ignore as these are tests.
resp, err := c.httpClient.Do(req)
if err != nil {
return "", err
diff --git a/test/e2e/framework/helpers/yunikorn/wrappers.go
b/test/e2e/framework/helpers/yunikorn/wrappers.go
index 26a9237d..567509d3 100644
--- a/test/e2e/framework/helpers/yunikorn/wrappers.go
+++ b/test/e2e/framework/helpers/yunikorn/wrappers.go
@@ -39,7 +39,7 @@ import (
var k = k8s.KubeCtl{}
func EnsureYuniKornConfigsPresent() {
- Ω(k.SetClient()).To(BeNil())
+ Ω(k.SetClient()).To(Succeed())
By("Create initial configMap if not exists")
exists, err := k.ConfigMapExists(constants.ConfigMapName,
configmanager.YuniKornTestConfig.YkNamespace)
Ω(err).NotTo(HaveOccurred())
@@ -74,7 +74,7 @@ func UpdateCustomConfigMapWrapper(oldConfigMap *v1.ConfigMap,
schedPolicy string
}
func UpdateCustomConfigMapWrapperWithMap(oldConfigMap *v1.ConfigMap,
schedPolicy string, customMap map[string]string, mutator func(sc
*configs.SchedulerConfig) error) {
- Ω(k.SetClient()).To(BeNil())
+ Ω(k.SetClient()).To(Succeed())
By("Port-forward the scheduler pod")
fwdErr := k.PortForwardYkSchedulerPod()
Ω(fwdErr).NotTo(HaveOccurred())
@@ -82,7 +82,7 @@ func UpdateCustomConfigMapWrapperWithMap(oldConfigMap
*v1.ConfigMap, schedPolicy
By("Enabling new scheduling config")
// Save old configMap
- Ω(k.SetClient()).To(BeNil())
+ Ω(k.SetClient()).To(Succeed())
var c, err =
k.GetConfigMaps(configmanager.YuniKornTestConfig.YkNamespace,
configmanager.DefaultYuniKornConfigMap)
Ω(err).NotTo(HaveOccurred())
@@ -122,7 +122,7 @@ func UpdateCustomConfigMapWrapperWithMap(oldConfigMap
*v1.ConfigMap, schedPolicy
}
func RestoreConfigMapWrapper(oldConfigMap *v1.ConfigMap) {
- Ω(k.SetClient()).To(BeNil())
+ Ω(k.SetClient()).To(Succeed())
By("Restoring the old config maps")
var c, err =
k.GetConfigMaps(configmanager.YuniKornTestConfig.YkNamespace,
configmanager.DefaultYuniKornConfigMap)
@@ -168,3 +168,4 @@ var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
var BeEquivalentTo = gomega.BeEquivalentTo
+var Succeed = gomega.Succeed
diff --git a/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
b/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
index 1f7bbc04..465592aa 100644
--- a/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
+++ b/test/e2e/gang_scheduling/gang_scheduling_suite_test.go
@@ -87,3 +87,4 @@ var BeNumerically = gomega.BeNumerically
var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
+var Succeed = gomega.Succeed
diff --git a/test/e2e/gang_scheduling/gang_scheduling_test.go
b/test/e2e/gang_scheduling/gang_scheduling_test.go
index b62a05af..57cbbe1d 100644
--- a/test/e2e/gang_scheduling/gang_scheduling_test.go
+++ b/test/e2e/gang_scheduling/gang_scheduling_test.go
@@ -63,7 +63,7 @@ var (
var _ = Describe("", func() {
BeforeEach(func() {
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
ns = "ns-" + common.RandSeq(10)
nsQueue = "root." + ns
@@ -185,7 +185,7 @@ var _ = Describe("", func() {
for i, tg := range annotations.TaskGroups {
jobPods, lstErr := kClient.ListPods(ns,
fmt.Sprintf("job-name=%s", realJobNames[i]))
Ω(lstErr).NotTo(HaveOccurred())
- Ω(len(jobPods.Items)).Should(BeNumerically("==",
tg.MinMember))
+ Ω(jobPods.Items).To(gomega.HaveLen(int(tg.MinMember)),
"Job pods count is not correct")
realPodNodes[tg.Name] = map[string]int{}
for _, pod := range jobPods.Items {
podRunErr := kClient.WaitForPodRunning(ns,
pod.Name, time.Minute*5)
@@ -223,7 +223,7 @@ var _ = Describe("", func() {
Ω(phErr).NotTo(HaveOccurred())
appDaoInfo, appDaoInfoErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appID)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.Allocations)).To(Equal(int(6)), "Allocations
count is not correct")
+ Ω(appDaoInfo.Allocations).To(gomega.HaveLen(int(6)),
"Allocations count is not correct")
})
// Test to verify soft GS style behaviour
@@ -258,11 +258,11 @@ var _ = Describe("", func() {
Ω(phErr).NotTo(HaveOccurred())
appDaoInfo, appDaoInfoErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appID)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.PlaceholderData)).To(Equal(2), "Placeholder
count is not correct")
- Ω(len(appDaoInfo.Allocations)).To(Equal(int(3)), "Allocations
count is not correct")
+ Ω(appDaoInfo.PlaceholderData).To(gomega.HaveLen(2),
"Placeholder count is not correct")
+ Ω(appDaoInfo.Allocations).To(gomega.HaveLen(int(3)),
"Allocations count is not correct")
for _, alloc := range appDaoInfo.Allocations {
- Ω(alloc.Placeholder).To(Equal(false), "Allocation
should be non placeholder")
- Ω(alloc.PlaceholderUsed).To(Equal(false), "Allocation
should not be replacement of ph")
+ Ω(alloc.Placeholder).To(gomega.BeFalse(), "Allocation
should be non placeholder")
+ Ω(alloc.PlaceholderUsed).To(gomega.BeFalse(),
"Allocation should not be replacement of ph")
}
})
@@ -295,7 +295,7 @@ var _ = Describe("", func() {
// Ensure placeholders are timed out and allocations count is
correct as app started running normal because of 'soft' gang style
appDaoInfo, appDaoInfoErr :=
restClient.GetCompletedAppInfo(configmanager.DefaultPartition, appID)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.PlaceholderData)).To(Equal(2), "Placeholder
count is not correct")
+ Ω(appDaoInfo.PlaceholderData).To(gomega.HaveLen(2),
"Placeholder count is not correct")
checkPlaceholderData(appDaoInfo, groupA, 3, 0, 3)
checkPlaceholderData(appDaoInfo, groupB, 3, 0, 3)
})
@@ -366,19 +366,19 @@ var _ = Describe("", func() {
appDaoInfo, appDaoInfoErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appIDA)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.Allocations)).To(Equal(0), "Allocations count
is not correct")
- Ω(len(appDaoInfo.PlaceholderData)).To(Equal(0), "Placeholder
count is not correct")
+ Ω(appDaoInfo.Allocations).To(gomega.BeEmpty(), "Allocations
count is not correct")
+ Ω(appDaoInfo.PlaceholderData).To(gomega.BeEmpty(), "Placeholder
count is not correct")
appDaoInfo, appDaoInfoErr =
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appIDB)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.Allocations)).To(Equal(3), "Allocations count
is not correct")
- Ω(len(appDaoInfo.PlaceholderData)).To(Equal(1), "Placeholder
count is not correct")
+ Ω(appDaoInfo.Allocations).To(gomega.HaveLen(3), "Allocations
count is not correct")
+ Ω(appDaoInfo.PlaceholderData).To(gomega.HaveLen(1),
"Placeholder count is not correct")
Ω(int(appDaoInfo.PlaceholderData[0].Count)).To(Equal(int(3)),
"Placeholder count is not correct")
appDaoInfo, appDaoInfoErr =
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appIDC)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.Allocations)).To(Equal(0), "Allocations count
is not correct")
- Ω(len(appDaoInfo.PlaceholderData)).To(Equal(0), "Placeholder
count is not correct")
+ Ω(appDaoInfo.Allocations).To(gomega.BeEmpty(), "Allocations
count is not correct")
+ Ω(appDaoInfo.PlaceholderData).To(gomega.BeEmpty(), "Placeholder
count is not correct")
})
// Test validates that lost placeholders resources are decremented by
Yunikorn.
@@ -395,7 +395,7 @@ var _ = Describe("", func() {
nodes, err := kClient.GetNodes()
Ω(err).NotTo(HaveOccurred())
workerNodes := k8s.GetWorkerNodes(*nodes)
- Ω(len(workerNodes)).NotTo(Equal(0))
+ Ω(workerNodes).NotTo(gomega.BeEmpty())
pdTimeout := 60
annotations := k8s.PodAnnotation{
@@ -440,7 +440,7 @@ var _ = Describe("", func() {
// Verify app allocations correctly decremented
appInfo, appErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appID)
Ω(appErr).NotTo(HaveOccurred())
- Ω(len(appInfo.Allocations)).To(Equal(0), "Placeholder
allocation not removed from app")
+ Ω(appInfo.Allocations).To(gomega.BeEmpty(), "Placeholder
allocation not removed from app")
// Verify no app allocation in nodeA
ykNodes, nodeErr :=
restClient.GetNodes(configmanager.DefaultPartition)
@@ -508,7 +508,7 @@ var _ = Describe("", func() {
By("Verify app allocation is empty")
appInfo, restErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appID)
Ω(restErr).NotTo(HaveOccurred())
- Ω(len(appInfo.Allocations)).To(Equal(0))
+ Ω(appInfo.Allocations).To(gomega.BeEmpty())
})
// Test to verify originator deletion will trigger placeholders cleanup
@@ -574,8 +574,8 @@ var _ = Describe("", func() {
Ω(phErr).NotTo(HaveOccurred())
appDaoInfo, appDaoInfoErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appID)
Ω(appDaoInfoErr).NotTo(HaveOccurred())
- Ω(len(appDaoInfo.PlaceholderData)).To(Equal(1), "Placeholder
count is not correct")
- Ω(len(appDaoInfo.Allocations)).To(Equal(int(3)), "Allocations
count is not correct")
+ Ω(appDaoInfo.PlaceholderData).To(gomega.HaveLen(1),
"Placeholder count is not correct")
+ Ω(appDaoInfo.Allocations).To(gomega.HaveLen(int(3)),
"Allocations count is not correct")
Ω(appDaoInfo.UsedResource[hugepageKey]).To(Equal(int64(314572800)), "Used huge
page resource is not correct")
})
@@ -621,11 +621,11 @@ var _ = Describe("", func() {
for _, alloc := range appDaoInfo.Allocations {
podName :=
alloc.AllocationTags["kubernetes.io/meta/podName"]
if podName == originator.Name {
- Ω(alloc.Originator).To(Equal(true), "Originator
pod should be a originator pod")
- Ω(alloc.Placeholder).To(Equal(false),
"Originator pod should not be a placeholder pod")
+ Ω(alloc.Originator).To(gomega.BeTrue(),
"Originator pod should be a originator pod")
+ Ω(alloc.Placeholder).To(gomega.BeFalse(),
"Originator pod should not be a placeholder pod")
} else {
- Ω(alloc.Originator).To(Equal(false),
"Placeholder pod should not be a originator pod")
- Ω(alloc.Placeholder).To(Equal(true),
"Placeholder pod should be a placeholder pod")
+ Ω(alloc.Originator).To(gomega.BeFalse(),
"Placeholder pod should not be a originator pod")
+ Ω(alloc.Placeholder).To(gomega.BeTrue(),
"Placeholder pod should be a placeholder pod")
}
}
@@ -646,11 +646,11 @@ var _ = Describe("", func() {
for _, alloc := range appDaoInfo.Allocations {
podName :=
alloc.AllocationTags["kubernetes.io/meta/podName"]
if podName == originator.Name {
- Ω(alloc.Originator).To(Equal(true), "Originator
pod should be a originator pod")
- Ω(alloc.Placeholder).To(Equal(false),
"Originator pod should not be a placeholder pod")
+ Ω(alloc.Originator).To(gomega.BeTrue(),
"Originator pod should be a originator pod")
+ Ω(alloc.Placeholder).To(gomega.BeFalse(),
"Originator pod should not be a placeholder pod")
} else {
- Ω(alloc.Originator).To(Equal(false),
"Placeholder pod should not be a originator pod")
- Ω(alloc.Placeholder).To(Equal(true),
"Placeholder pod should be a placeholder pod")
+ Ω(alloc.Originator).To(gomega.BeFalse(),
"Placeholder pod should not be a originator pod")
+ Ω(alloc.Placeholder).To(gomega.BeTrue(),
"Placeholder pod should be a placeholder pod")
}
}
})
@@ -777,7 +777,7 @@ func checkPlaceholderData(appDaoInfo
*dao.ApplicationDAOInfo, tgName string, cou
break
}
}
- Ω(verified).To(Equal(true), fmt.Sprintf("Can't find task group %s in
app info", tgName))
+ Ω(verified).To(gomega.BeTrue(), fmt.Sprintf("Can't find task group %s
in app info", tgName))
}
func getPlaceholderData(appDaoInfo *dao.ApplicationDAOInfo, tgName string)
(bool, int, int, int) {
@@ -881,5 +881,5 @@ func verifyOriginatorDeletionCase(withOwnerRef bool) {
By("Verify app allocation is empty")
appInfo, restErr :=
restClient.GetAppInfo(configmanager.DefaultPartition, nsQueue, appID)
Ω(restErr).NotTo(HaveOccurred())
- Ω(len(appInfo.Allocations)).To(BeNumerically("==", 0))
+ Ω(appInfo.Allocations).To(gomega.BeEmpty())
}
diff --git a/test/e2e/node_resources/node_resources_suite_test.go
b/test/e2e/node_resources/node_resources_suite_test.go
index 181be5e6..8f4bdf01 100644
--- a/test/e2e/node_resources/node_resources_suite_test.go
+++ b/test/e2e/node_resources/node_resources_suite_test.go
@@ -81,3 +81,4 @@ var Equal = gomega.Equal
var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
+var Succeed = gomega.Succeed
diff --git a/test/e2e/node_resources/node_resources_test.go
b/test/e2e/node_resources/node_resources_test.go
index 61c7689f..c67523d4 100644
--- a/test/e2e/node_resources/node_resources_test.go
+++ b/test/e2e/node_resources/node_resources_test.go
@@ -38,7 +38,7 @@ var _ = Describe("", func() {
BeforeEach(func() {
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
ns = "ns-" + common.RandSeq(10)
By(fmt.Sprintf("Creating namespace: %s", ns))
var ns1, err1 = kClient.CreateNamespace(ns, nil)
diff --git a/test/e2e/persistent_volume/persistent_volume_suite_test.go
b/test/e2e/persistent_volume/persistent_volume_suite_test.go
index 24c65c94..9aa390a1 100644
--- a/test/e2e/persistent_volume/persistent_volume_suite_test.go
+++ b/test/e2e/persistent_volume/persistent_volume_suite_test.go
@@ -59,7 +59,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
diff --git a/test/e2e/pod_resource_scaling/pod_resource_scaling_suite_test.go
b/test/e2e/pod_resource_scaling/pod_resource_scaling_suite_test.go
index 0049eb75..2fcae50b 100644
--- a/test/e2e/pod_resource_scaling/pod_resource_scaling_suite_test.go
+++ b/test/e2e/pod_resource_scaling/pod_resource_scaling_suite_test.go
@@ -58,7 +58,7 @@ var _ = ginkgo.BeforeSuite(func() {
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
diff --git a/test/e2e/predicates/predicates_suite_test.go
b/test/e2e/predicates/predicates_suite_test.go
index f3ea9872..9ccf3a83 100644
--- a/test/e2e/predicates/predicates_suite_test.go
+++ b/test/e2e/predicates/predicates_suite_test.go
@@ -94,3 +94,4 @@ var BeZero = gomega.BeZero
var BeEquivalentTo = gomega.BeEquivalentTo
var ContainElement = gomega.ContainElement
var HaveKeyWithValue = gomega.HaveKeyWithValue
+var Succeed = gomega.Succeed
diff --git a/test/e2e/predicates/predicates_test.go
b/test/e2e/predicates/predicates_test.go
index fcda188a..941bcc5e 100644
--- a/test/e2e/predicates/predicates_test.go
+++ b/test/e2e/predicates/predicates_test.go
@@ -82,7 +82,7 @@ var _ = Describe("Predicates", func() {
BeforeEach(func() {
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
diff --git a/test/e2e/preemption/preemption_suite_test.go
b/test/e2e/preemption/preemption_suite_test.go
index ff5ff560..0c5eb239 100644
--- a/test/e2e/preemption/preemption_suite_test.go
+++ b/test/e2e/preemption/preemption_suite_test.go
@@ -62,7 +62,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
Ω(restClient).NotTo(gomega.BeNil())
@@ -76,7 +76,7 @@ var _ = ginkgo.BeforeSuite(func() {
var nodes *v1.NodeList
nodes, err = kClient.GetNodes()
Ω(err).NotTo(gomega.HaveOccurred())
- Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Nodes cant be empty")
+ Ω(nodes.Items).NotTo(gomega.BeEmpty(), "Nodes can't be empty")
// Extract node allocatable resources
for _, node := range nodes.Items {
diff --git a/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
b/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
index 538ce0d8..8975bed9 100644
--- a/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
+++ b/test/e2e/priority_scheduling/priority_scheduling_suite_test.go
@@ -92,7 +92,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
var err error
kubeClient = k8s.KubeCtl{}
- Expect(kubeClient.SetClient()).To(BeNil())
+ Expect(kubeClient.SetClient()).To(Succeed())
yunikorn.EnsureYuniKornConfigsPresent()
yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
@@ -113,7 +113,7 @@ var _ = ginkgo.BeforeSuite(func() {
var _ = ginkgo.AfterSuite(func() {
var err error
kubeClient = k8s.KubeCtl{}
- Expect(kubeClient.SetClient()).To(BeNil())
+ Expect(kubeClient.SetClient()).To(Succeed())
By(fmt.Sprintf("Removing priority class %s", normalPriorityClass.Name))
err = kubeClient.DeletePriorityClass(normalPriorityClass.Name)
@@ -137,3 +137,4 @@ var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
var Expect = gomega.Expect
var Equal = gomega.Equal
+var Succeed = gomega.Succeed
diff --git a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
index d6a7452d..4d9bf886 100644
--- a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
+++ b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_suite_test.go
@@ -91,3 +91,4 @@ var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
var BeEmpty = gomega.BeEmpty
var BeEquivalentTo = gomega.BeEquivalentTo
+var Succeed = gomega.Succeed
diff --git a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
index d7220e44..bf0adce9 100644
--- a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
+++ b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
@@ -60,7 +60,7 @@ var _ = Describe("", func() {
BeforeEach(func() {
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
pods = []string{}
diff --git a/test/e2e/recovery_and_restart/recovery_and_restart_suite_test.go
b/test/e2e/recovery_and_restart/recovery_and_restart_suite_test.go
index 142eccf1..436e50b9 100644
--- a/test/e2e/recovery_and_restart/recovery_and_restart_suite_test.go
+++ b/test/e2e/recovery_and_restart/recovery_and_restart_suite_test.go
@@ -57,7 +57,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
diff --git a/test/e2e/recovery_and_restart/recovery_and_restart_test.go
b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
index 09d89b17..6906c3f6 100644
--- a/test/e2e/recovery_and_restart/recovery_and_restart_test.go
+++ b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
@@ -109,7 +109,7 @@ var _ = ginkgo.Describe("", func() {
ginkgo.It("Verify_SleepJobs_Restart_YK", func() {
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
defer yunikorn.RestorePortForwarding(&kClient)
appID1 := normalSleepJobPrefix + "-" + common.RandSeq(5)
@@ -152,7 +152,7 @@ var _ = ginkgo.Describe("", func() {
ginkgo.It("Verify_GangScheduling_TwoGangs_Restart_YK", func() {
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
defer yunikorn.RestorePortForwarding(&kClient)
appID := gangSleepJobPrefix + "-" + common.RandSeq(5)
@@ -242,13 +242,13 @@ var _ = ginkgo.Describe("", func() {
ginkgo.It("Verify_GangScheduling_PendingPlaceholders_Restart_YK",
func() {
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
defer yunikorn.RestorePortForwarding(&kClient)
ginkgo.By("Trying to find an available worker node")
nodes, err := kClient.GetNodes()
Ω(err).NotTo(gomega.HaveOccurred())
- Ω(len(nodes.Items) >= 2).Should(gomega.Equal(true), "Not enough
nodes in the cluster, need at least 2")
+ Ω(len(nodes.Items)).Should(gomega.BeNumerically(">=", 2), "Not
enough nodes in the cluster, need at least 2")
var workerResource *resource.Quantity
masterPresent := false
diff --git a/test/e2e/resource_fairness/resource_fairness_suite_test.go
b/test/e2e/resource_fairness/resource_fairness_suite_test.go
index ff389ec4..81c6da09 100644
--- a/test/e2e/resource_fairness/resource_fairness_suite_test.go
+++ b/test/e2e/resource_fairness/resource_fairness_suite_test.go
@@ -44,7 +44,7 @@ var kClient = k8s.KubeCtl{} //nolint
var _ = BeforeSuite(func() {
_, filename, _, _ := runtime.Caller(0)
suiteName = common.GetSuiteName(filename)
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
yunikorn.EnsureYuniKornConfigsPresent()
})
@@ -82,3 +82,4 @@ var Equal = gomega.Equal
var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
+var Succeed = gomega.Succeed
diff --git a/test/e2e/resource_fairness/resource_fairness_test.go
b/test/e2e/resource_fairness/resource_fairness_test.go
index 0e6974a7..6b8c1fed 100644
--- a/test/e2e/resource_fairness/resource_fairness_test.go
+++ b/test/e2e/resource_fairness/resource_fairness_test.go
@@ -51,7 +51,7 @@ var _ = Describe("FairScheduling:", func() {
ns = "test-" + common.RandSeq(10)
queuePath = "root." + ns
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
By("Setting custom YuniKorn configuration")
yunikorn.UpdateCustomConfigMapWrapper(oldConfigMap, "fair",
func(sc *configs.SchedulerConfig) error {
@@ -69,7 +69,7 @@ var _ = Describe("FairScheduling:", func() {
return nil
})
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
// Restart yunikorn and port-forward
// Required to change node sort policy.
diff --git
a/test/e2e/restart_changed_config/restart_changed_config_suite_test.go
b/test/e2e/restart_changed_config/restart_changed_config_suite_test.go
index 747ed4c9..1c364fce 100644
--- a/test/e2e/restart_changed_config/restart_changed_config_suite_test.go
+++ b/test/e2e/restart_changed_config/restart_changed_config_suite_test.go
@@ -55,7 +55,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
diff --git a/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
b/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
index acbb143d..660e0cf1 100644
--- a/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
+++ b/test/e2e/simple_preemptor/simple_preemptor_suite_test.go
@@ -59,7 +59,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
Ω(restClient).NotTo(gomega.BeNil())
@@ -74,7 +74,7 @@ var _ = ginkgo.BeforeSuite(func() {
var nodes *v1.NodeList
nodes, err = kClient.GetNodes()
Ω(err).NotTo(gomega.HaveOccurred())
- Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Events cant be empty")
+ Ω(nodes.Items).NotTo(gomega.BeEmpty(), "Events can't be empty")
// Extract node allocatable resources
for _, node := range nodes.Items {
diff --git a/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
b/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
index 3d35bab6..c46bc44a 100644
--- a/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
+++ b/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_suite_test.go
@@ -84,3 +84,4 @@ var Ω = gomega.Expect
var BeNil = gomega.BeNil
var HaveOccurred = gomega.HaveOccurred
var BeEmpty = gomega.BeEmpty
+var Succeed = gomega.Succeed
diff --git a/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_test.go
b/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_test.go
index 3c4a661b..19830352 100644
--- a/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_test.go
+++ b/test/e2e/spark_jobs_scheduling/spark_jobs_scheduling_test.go
@@ -56,7 +56,7 @@ var _ = Describe("", func() {
By(fmt.Sprintf("Spark_py image is: %s", sparkPyImage))
Ω(sparkPyImage).NotTo(BeEmpty())
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(BeNil())
+ Ω(kClient.SetClient()).To(Succeed())
Ω(exErr).NotTo(HaveOccurred())
By(fmt.Sprintf("Creating namespace: %s for spark jobs",
sparkNS))
ns1, err := kClient.CreateNamespace(sparkNS, nil)
diff --git a/test/e2e/user_group_limit/user_group_limit_suite_test.go
b/test/e2e/user_group_limit/user_group_limit_suite_test.go
index 2718009b..e1b0d6db 100644
--- a/test/e2e/user_group_limit/user_group_limit_suite_test.go
+++ b/test/e2e/user_group_limit/user_group_limit_suite_test.go
@@ -55,7 +55,7 @@ var _ = ginkgo.BeforeSuite(func() {
suiteName = common.GetSuiteName(filename)
// Initializing kubectl client
kClient = k8s.KubeCtl{}
- Ω(kClient.SetClient()).To(gomega.BeNil())
+ Ω(kClient.SetClient()).To(gomega.Succeed())
// Initializing rest client
restClient = yunikorn.RClient{}
Ω(restClient).NotTo(gomega.BeNil())
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]