This is an automated email from the ASF dual-hosted git repository.
ccondit pushed a commit to branch branch-1.6
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/branch-1.6 by this push:
new 560ef3d8 [YUNIKORN-3024] Update to go 1.23 (#950)
560ef3d8 is described below
commit 560ef3d839781a13c3cc9d1293c65a22a3bc87d7
Author: kaichiachen <[email protected]>
AuthorDate: Thu Feb 20 11:55:29 2025 -0600
[YUNIKORN-3024] Update to go 1.23 (#950)
Closes: #950
Signed-off-by: Craig Condit <[email protected]>
(cherry picked from commit 2964df3462d3fa9d7404b20498705a9874b1360b)
---
.go_version | 2 +-
.golangci.yml | 1 -
Makefile | 2 +-
pkg/admission/admission_controller.go | 3 +-
pkg/admission/admission_controller_test.go | 16 +++++--
pkg/cache/application_state.go | 9 ++--
pkg/cache/placeholder_manager.go | 7 +--
pkg/cache/scheduler_callback.go | 2 +-
pkg/cache/task_state.go | 6 ++-
pkg/client/apifactory_mock.go | 12 +++--
pkg/client/apifactory_test.go | 56 ++++++++++++++++++++++
pkg/conf/schedulerconf.go | 2 +-
pkg/dispatcher/dispatch_test.go | 4 +-
pkg/dispatcher/dispatcher.go | 7 ++-
pkg/log/logger.go | 12 +++--
pkg/log/logger_test.go | 3 ++
pkg/plugin/predicates/predicate_manager_test.go | 2 +-
pkg/shim/scheduler_mock_test.go | 2 +-
pkg/shim/scheduler_perf_test.go | 6 ++-
test/e2e/framework/helpers/common/utils.go | 2 +-
test/e2e/framework/helpers/k8s/gang_job.go | 14 ++++--
test/e2e/framework/helpers/k8s/k8s_utils.go | 18 +++++--
.../framework/helpers/yunikorn/rest_api_utils.go | 4 +-
.../recovery_and_restart_test.go | 3 +-
24 files changed, 148 insertions(+), 47 deletions(-)
diff --git a/.go_version b/.go_version
index 71f7f51d..a1b6e17d 100644
--- a/.go_version
+++ b/.go_version
@@ -1 +1 @@
-1.22
+1.23
diff --git a/.golangci.yml b/.golangci.yml
index 9819f608..71136078 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -18,7 +18,6 @@
# options for analysis running
run:
issues-exit-code: 1
- skip-dirs-use-default: true
modules-download-mode: readonly
timeout: 10m
diff --git a/Makefile b/Makefile
index 07e70510..593b14d1 100644
--- a/Makefile
+++ b/Makefile
@@ -162,7 +162,7 @@ endif
export PATH := $(BASE_DIR)/$(SHELLCHECK_PATH):$(PATH)
# golangci-lint
-GOLANGCI_LINT_VERSION=1.57.2
+GOLANGCI_LINT_VERSION=1.63.4
GOLANGCI_LINT_PATH=$(TOOLS_DIR)/golangci-lint-v$(GOLANGCI_LINT_VERSION)
GOLANGCI_LINT_BIN=$(GOLANGCI_LINT_PATH)/golangci-lint
GOLANGCI_LINT_ARCHIVE=golangci-lint-$(GOLANGCI_LINT_VERSION)-$(OS)-$(EXEC_ARCH).tar.gz
diff --git a/pkg/admission/admission_controller.go
b/pkg/admission/admission_controller.go
index a40e39a9..c0c34326 100644
--- a/pkg/admission/admission_controller.go
+++ b/pkg/admission/admission_controller.go
@@ -22,6 +22,7 @@ import (
"bytes"
"crypto/sha256"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -607,7 +608,7 @@ func (c *AdmissionController) validateConfigMap(namespace
string, cm *v1.ConfigM
return nil
}
if !responseData.Allowed {
- err = fmt.Errorf(responseData.Reason)
+ err = errors.New(responseData.Reason)
log.Log(log.Admission).Error("Configmap validation failed,
aborting", zap.Error(err))
return err
}
diff --git a/pkg/admission/admission_controller_test.go
b/pkg/admission/admission_controller_test.go
index 7cefee4b..ec064c7f 100644
--- a/pkg/admission/admission_controller_test.go
+++ b/pkg/admission/admission_controller_test.go
@@ -568,7 +568,9 @@ func TestMutate(t *testing.T) {
assert.Check(t, resp.Allowed, "response not allowed for unknown object
type")
assert.Check(t, len(resp.Patch) > 0, "empty patch for deployment")
annotations := annotationsFromDeployment(t, resp.Patch)
- assert.Equal(t, annotations[common.UserInfoAnnotation].(string),
"{\"user\":\"testExtUser\"}")
+ annotationValue, ok := annotations[common.UserInfoAnnotation].(string)
+ assert.Assert(t, ok, "UserInfoAnnotation value is not a string")
+ assert.Equal(t, annotationValue, "{\"user\":\"testExtUser\"}")
// deployment - annotation is not set, bypassAuth is enabled
ac = prepareController(t, "", "", "^kube-system$,^bypass$", "",
"^nolabel$", true, true)
@@ -838,7 +840,9 @@ func schedulerName(t *testing.T, patch []byte) string {
ops := parsePatch(t, patch)
for _, op := range ops {
if op.Path == "/spec/schedulerName" {
- return op.Value.(string)
+ val, ok := op.Value.(string)
+ assert.Assert(t, ok, "scheduler name value is not a
string")
+ return val
}
}
return ""
@@ -848,7 +852,9 @@ func labels(t *testing.T, patch []byte)
map[string]interface{} {
ops := parsePatch(t, patch)
for _, op := range ops {
if op.Path == "/metadata/labels" {
- return op.Value.(map[string]interface{})
+ val, ok := op.Value.(map[string]interface{})
+ assert.Assert(t, ok, "labels value is not a map")
+ return val
}
}
return make(map[string]interface{})
@@ -858,7 +864,9 @@ func annotationsFromDeployment(t *testing.T, patch []byte)
map[string]interface{
ops := parsePatch(t, patch)
for _, op := range ops {
if op.Path == "/spec/template/metadata/annotations" {
- return op.Value.(map[string]interface{})
+ val, ok := op.Value.(map[string]interface{})
+ assert.Assert(t, ok, "annotations value is not a map")
+ return val
}
}
return make(map[string]interface{})
diff --git a/pkg/cache/application_state.go b/pkg/cache/application_state.go
index 59541b24..d22fde61 100644
--- a/pkg/cache/application_state.go
+++ b/pkg/cache/application_state.go
@@ -470,7 +470,8 @@ func newAppState() *fsm.FSM { //nolint:funlen
RejectApplication.String(): func(_ context.Context,
event *fsm.Event) {
app := event.Args[0].(*Application)
//nolint:errcheck
eventArgs := make([]string, 1)
- if err :=
events.GetEventArgsAsStrings(eventArgs, event.Args[1].([]interface{})); err !=
nil {
+ generic := event.Args[1].([]interface{})
//nolint:errcheck
+ if err :=
events.GetEventArgsAsStrings(eventArgs, generic); err != nil {
log.Log(log.ShimFSM).Error("fail to
parse event arg", zap.Error(err))
return
}
@@ -484,7 +485,8 @@ func newAppState() *fsm.FSM { //nolint:funlen
FailApplication.String(): func(_ context.Context, event
*fsm.Event) {
app := event.Args[0].(*Application)
//nolint:errcheck
eventArgs := make([]string, 1)
- if err :=
events.GetEventArgsAsStrings(eventArgs, event.Args[1].([]interface{})); err !=
nil {
+ generic := event.Args[1].([]interface{})
//nolint:errcheck
+ if err :=
events.GetEventArgsAsStrings(eventArgs, generic); err != nil {
log.Log(log.ShimFSM).Error("fail to
parse event arg", zap.Error(err))
return
}
@@ -498,7 +500,8 @@ func newAppState() *fsm.FSM { //nolint:funlen
ReleaseAppAllocation.String(): func(_ context.Context,
event *fsm.Event) {
app := event.Args[0].(*Application)
//nolint:errcheck
eventArgs := make([]string, 2)
- if err :=
events.GetEventArgsAsStrings(eventArgs, event.Args[1].([]interface{})); err !=
nil {
+ generic := event.Args[1].([]interface{})
//nolint:errcheck
+ if err :=
events.GetEventArgsAsStrings(eventArgs, generic); err != nil {
log.Log(log.ShimFSM).Error("fail to
parse event arg", zap.Error(err))
return
}
diff --git a/pkg/cache/placeholder_manager.go b/pkg/cache/placeholder_manager.go
index 7a44ea06..17fc45e2 100644
--- a/pkg/cache/placeholder_manager.go
+++ b/pkg/cache/placeholder_manager.go
@@ -40,7 +40,7 @@ type PlaceholderManager struct {
// and keep retrying deleting them in order to avoid wasting resources.
orphanPods map[string]*v1.Pod
stopChan chan struct{}
- running atomic.Value
+ running atomic.Bool
cleanupTime time.Duration
// a simple mutex will do we do not have separate read and write paths
locking.RWMutex
@@ -54,11 +54,8 @@ var (
func NewPlaceholderManager(clients *client.Clients) *PlaceholderManager {
mu.Lock()
defer mu.Unlock()
- var r atomic.Value
- r.Store(false)
placeholderMgr = &PlaceholderManager{
clients: clients,
- running: r,
orphanPods: make(map[string]*v1.Pod),
stopChan: make(chan struct{}),
cleanupTime: 5 * time.Second,
@@ -173,7 +170,7 @@ func (mgr *PlaceholderManager) Stop() {
}
func (mgr *PlaceholderManager) isRunning() bool {
- return mgr.running.Load().(bool)
+ return mgr.running.Load()
}
func (mgr *PlaceholderManager) setRunning(flag bool) {
diff --git a/pkg/cache/scheduler_callback.go b/pkg/cache/scheduler_callback.go
index 643b4c71..1a21f5a2 100644
--- a/pkg/cache/scheduler_callback.go
+++ b/pkg/cache/scheduler_callback.go
@@ -191,7 +191,7 @@ func (callback *AsyncRMCallback) PreemptionPredicates(args
*si.PreemptionPredica
}
return &si.PreemptionPredicatesResponse{
Success: ok,
- Index: int32(index),
+ Index: int32(index), //nolint:gosec
}
}
diff --git a/pkg/cache/task_state.go b/pkg/cache/task_state.go
index d244fa8d..112fdd5a 100644
--- a/pkg/cache/task_state.go
+++ b/pkg/cache/task_state.go
@@ -402,7 +402,8 @@ func callbacks(states *TStates) fsm.Callbacks {
task := event.Args[0].(*Task) //nolint:errcheck
eventArgs := make([]string, 1)
reason := ""
- if err := events.GetEventArgsAsStrings(eventArgs,
event.Args[1].([]interface{})); err != nil {
+ generic := event.Args[1].([]interface{})
//nolint:errcheck
+ if err := events.GetEventArgsAsStrings(eventArgs,
generic); err != nil {
log.Log(log.ShimFSM).Error("failed to parse
event arg", zap.Error(err))
reason = err.Error()
} else {
@@ -421,7 +422,8 @@ func callbacks(states *TStates) fsm.Callbacks {
beforeHook(TaskAllocated): func(_ context.Context, event
*fsm.Event) {
task := event.Args[0].(*Task) //nolint:errcheck
eventArgs := make([]string, 2)
- if err := events.GetEventArgsAsStrings(eventArgs,
event.Args[1].([]interface{})); err != nil {
+ generic := event.Args[1].([]interface{})
//nolint:errcheck
+ if err := events.GetEventArgsAsStrings(eventArgs,
generic); err != nil {
log.Log(log.ShimFSM).Error("failed to parse
event arg", zap.Error(err))
return
}
diff --git a/pkg/client/apifactory_mock.go b/pkg/client/apifactory_mock.go
index 0b2954d1..1194b6da 100644
--- a/pkg/client/apifactory_mock.go
+++ b/pkg/client/apifactory_mock.go
@@ -397,12 +397,18 @@ func (m *MockedAPIProvider) UpdatePriorityClass(oldObj
*schedv1.PriorityClass, n
}
}
-func (m *MockedAPIProvider) GetPodBindStats() BindStats {
- return m.clients.KubeClient.(*KubeClientMock).GetBindStats()
+func (m *MockedAPIProvider) GetPodBindStats() *BindStats {
+ kubeClient, ok := m.clients.KubeClient.(*KubeClientMock)
+ if !ok {
+ log.Log(log.Test).Error("failed to get KubeClientMock")
+ return nil
+ }
+ bindStats := kubeClient.GetBindStats()
+ return &bindStats
}
func (m *MockedAPIProvider) GetBoundPods(clear bool) []BoundPod {
- return m.clients.KubeClient.(*KubeClientMock).GetBoundPods(clear)
+ return m.clients.KubeClient.(*KubeClientMock).GetBoundPods(clear) //
nolint: errcheck
}
// MockedPersistentVolumeInformer implements PersistentVolumeInformer interface
diff --git a/pkg/client/apifactory_test.go b/pkg/client/apifactory_test.go
index cf9271b0..be66c91e 100644
--- a/pkg/client/apifactory_test.go
+++ b/pkg/client/apifactory_test.go
@@ -43,3 +43,59 @@ func TestInformerTypes(t *testing.T) {
assert.Equal(t, "ReplicaSet", ReplicaSetInformerHandlers.String())
assert.Equal(t, "StatefulSet", StatefulSetInformerHandlers.String())
}
+
+func TestMockedAPIProvider_GetPodBindStats(t *testing.T) {
+ mock := &MockedAPIProvider{
+ clients: &Clients{
+ KubeClient: &KubeClientMock{
+ bindStats: BindStats{
+ Success: 10,
+ Errors: 2,
+ },
+ },
+ },
+ }
+
+ got := mock.GetPodBindStats()
+ assert.DeepEqual(t, &BindStats{
+ Success: 10,
+ Errors: 2,
+ }, got)
+
+ nilMock := &MockedAPIProvider{
+ clients: &Clients{
+ KubeClient: nil,
+ },
+ }
+
+ got = nilMock.GetPodBindStats()
+ assert.DeepEqual(t, (*BindStats)(nil), got)
+}
+
+func TestMockedAPIProvider_GetBoundPods(t *testing.T) {
+ mock := &MockedAPIProvider{
+ clients: &Clients{
+ KubeClient: &KubeClientMock{
+ boundPods: []BoundPod{
+ {Pod: "pod1", Host: "h1"},
+ {Pod: "pod2", Host: "h2"},
+ },
+ },
+ },
+ }
+ got := mock.GetBoundPods(true)
+ assert.DeepEqual(t, []BoundPod{
+ {Pod: "pod1", Host: "h1"},
+ {Pod: "pod2", Host: "h2"},
+ }, got)
+
+ emptyMock := &MockedAPIProvider{
+ clients: &Clients{
+ KubeClient: &KubeClientMock{
+ boundPods: []BoundPod{},
+ },
+ },
+ }
+ got = emptyMock.GetBoundPods(true)
+ assert.DeepEqual(t, []BoundPod{}, got)
+}
diff --git a/pkg/conf/schedulerconf.go b/pkg/conf/schedulerconf.go
index 2e941213..95bfad76 100644
--- a/pkg/conf/schedulerconf.go
+++ b/pkg/conf/schedulerconf.go
@@ -248,7 +248,7 @@ func checkNonReloadableBool(name string, old *bool, new
*bool) {
func GetSchedulerConf() *SchedulerConf {
once.Do(createConfigs)
- return confHolder.Load().(*SchedulerConf)
+ return confHolder.Load().(*SchedulerConf) //nolint:errcheck
}
func SetSchedulerConf(conf *SchedulerConf) {
diff --git a/pkg/dispatcher/dispatch_test.go b/pkg/dispatcher/dispatch_test.go
index 8a2cc381..50aec6ae 100644
--- a/pkg/dispatcher/dispatch_test.go
+++ b/pkg/dispatcher/dispatch_test.go
@@ -283,7 +283,9 @@ func TestExceedAsyncDispatchLimit(t *testing.T) {
Stop()
// check error
if err := recover(); err != nil {
- assert.Assert(t, strings.Contains(err.(error).Error(),
"dispatcher exceeds async-dispatch limit"))
+ errStr, ok := err.(error)
+ assert.Assert(t, ok, "Expected error type from panic,
got %T", err)
+ assert.Assert(t, strings.Contains(errStr.Error(),
"dispatcher exceeds async-dispatch limit"))
} else {
t.Error("Panic should be caught here")
}
diff --git a/pkg/dispatcher/dispatcher.go b/pkg/dispatcher/dispatcher.go
index b0960f19..6b72346d 100644
--- a/pkg/dispatcher/dispatcher.go
+++ b/pkg/dispatcher/dispatcher.go
@@ -55,7 +55,7 @@ type Dispatcher struct {
eventChan chan events.SchedulingEvent
stopChan chan struct{}
handlers map[EventType]map[string]func(interface{})
- running atomic.Value
+ running atomic.Bool
lock locking.RWMutex
stopped sync.WaitGroup
}
@@ -66,12 +66,11 @@ func initDispatcher() {
eventChan: make(chan events.SchedulingEvent,
eventChannelCapacity),
handlers: make(map[EventType]map[string]func(interface{})),
stopChan: make(chan struct{}),
- running: atomic.Value{},
lock: locking.RWMutex{},
}
dispatcher.setRunning(false)
DispatchTimeout = conf.GetSchedulerConf().DispatchTimeout
- AsyncDispatchLimit = max(10000, int32(eventChannelCapacity/10))
+ AsyncDispatchLimit = max(10000, int32(eventChannelCapacity/10))
//nolint:gosec
log.Log(log.ShimDispatcher).Info("Init dispatcher",
zap.Int("EventChannelCapacity", eventChannelCapacity),
@@ -146,7 +145,7 @@ func Dispatch(event events.SchedulingEvent) {
}
func (p *Dispatcher) isRunning() bool {
- return p.running.Load().(bool)
+ return p.running.Load()
}
func (p *Dispatcher) setRunning(flag bool) {
diff --git a/pkg/log/logger.go b/pkg/log/logger.go
index 96609847..f3ee845c 100644
--- a/pkg/log/logger.go
+++ b/pkg/log/logger.go
@@ -292,14 +292,16 @@ func parseLevel(level string) *zapcore.Level {
}
// parse numeric
- levelNum, err := strconv.ParseInt(level, 10, 31)
+ levelNum, err := strconv.ParseInt(level, 10, 8)
if err == nil {
- zapLevel = zapcore.Level(levelNum)
- if zapLevel < zapcore.DebugLevel {
+ // Validate levelNum is within valid zapcore.Level range before
conversion
+ switch {
+ case levelNum < int64(zapcore.DebugLevel):
zapLevel = zapcore.DebugLevel
- }
- if zapLevel >= zapcore.InvalidLevel {
+ case levelNum >= int64(zapcore.InvalidLevel):
zapLevel = zapcore.InvalidLevel - 1
+ default:
+ zapLevel = zapcore.Level(levelNum)
}
return &zapLevel
}
diff --git a/pkg/log/logger_test.go b/pkg/log/logger_test.go
index 984d2221..01a50828 100644
--- a/pkg/log/logger_test.go
+++ b/pkg/log/logger_test.go
@@ -164,6 +164,9 @@ func TestParseLevel(t *testing.T) {
assert.Equal(t, zapcore.PanicLevel, *parseLevel("PAnIC"))
assert.Equal(t, zapcore.FatalLevel, *parseLevel("faTal"))
assert.Assert(t, parseLevel("x") == nil, "parse error")
+
+ assert.Assert(t, parseLevel("-129") == nil, "Values outside int8 range
(-128 to 127)")
+ assert.Assert(t, parseLevel("128") == nil, "Values outside int8 range
(-128 to 127)")
}
func TestParentLogger(t *testing.T) {
diff --git a/pkg/plugin/predicates/predicate_manager_test.go
b/pkg/plugin/predicates/predicate_manager_test.go
index fe17db7a..d84d0acc 100644
--- a/pkg/plugin/predicates/predicate_manager_test.go
+++ b/pkg/plugin/predicates/predicate_manager_test.go
@@ -1099,7 +1099,7 @@ func makeResources(milliCPU, memory, pods, extendedA,
storage, hugePageA int64)
func newPodWithPort(hostPorts ...int) *v1.Pod {
var networkPorts []v1.ContainerPort
for _, port := range hostPorts {
- networkPorts = append(networkPorts, v1.ContainerPort{HostPort:
int32(port)})
+ networkPorts = append(networkPorts, v1.ContainerPort{HostPort:
int32(port)}) // nolint: gosec
}
return &v1.Pod{
Spec: v1.PodSpec{
diff --git a/pkg/shim/scheduler_mock_test.go b/pkg/shim/scheduler_mock_test.go
index b67746d7..82f80c48 100644
--- a/pkg/shim/scheduler_mock_test.go
+++ b/pkg/shim/scheduler_mock_test.go
@@ -323,7 +323,7 @@ func (fc *MockScheduler) getApplicationFromCore(appID,
partition string) *object
return
fc.coreContext.Scheduler.GetClusterContext().GetApplication(appID, partition)
}
-func (fc *MockScheduler) GetPodBindStats() client.BindStats {
+func (fc *MockScheduler) GetPodBindStats() *client.BindStats {
return fc.apiProvider.GetPodBindStats()
}
diff --git a/pkg/shim/scheduler_perf_test.go b/pkg/shim/scheduler_perf_test.go
index d3858399..509add8f 100644
--- a/pkg/shim/scheduler_perf_test.go
+++ b/pkg/shim/scheduler_perf_test.go
@@ -136,8 +136,10 @@ func BenchmarkSchedulingThroughPut(b *testing.B) {
assert.NilError(b, err, "scheduling did not finish in time")
stat := cluster.GetPodBindStats()
- diff := stat.Last.Sub(stat.First)
- fmt.Printf("Overall throughput: %.0f allocations/s\n",
float64(totalPods)/diff.Seconds())
+ if stat != nil {
+ diff := stat.Last.Sub(stat.First)
+ fmt.Printf("Overall throughput: %.0f allocations/s\n",
float64(totalPods)/diff.Seconds())
+ }
fmt.Println("Container allocation throughput based on metrics")
for _, d := range collector.getData() {
if d != 0 {
diff --git a/test/e2e/framework/helpers/common/utils.go
b/test/e2e/framework/helpers/common/utils.go
index fc26c77c..d7a11de1 100644
--- a/test/e2e/framework/helpers/common/utils.go
+++ b/test/e2e/framework/helpers/common/utils.go
@@ -236,7 +236,7 @@ func RunShellCmdForeground(cmdStr string) (string, error) {
}
if errStr := errStream.String(); len(errStr) > 0 {
- return stdOutStream.String(), fmt.Errorf(errStr)
+ return stdOutStream.String(), errors.New(errStr)
}
return stdOutStream.String(), nil
diff --git a/test/e2e/framework/helpers/k8s/gang_job.go
b/test/e2e/framework/helpers/k8s/gang_job.go
index 87429c3b..41348f2e 100644
--- a/test/e2e/framework/helpers/k8s/gang_job.go
+++ b/test/e2e/framework/helpers/k8s/gang_job.go
@@ -20,6 +20,8 @@ package k8s
import (
"encoding/json"
+ "fmt"
+ "math"
"strconv"
batchv1 "k8s.io/api/batch/v1"
@@ -101,7 +103,10 @@ func DecoratePodForGangScheduling(
return pod
}
-func InitTaskGroups(conf SleepPodConfig, mainTaskGroupName,
secondTaskGroupName string, parallelism int) []*cache.TaskGroup {
+func InitTaskGroups(conf SleepPodConfig, mainTaskGroupName,
secondTaskGroupName string, parallelism int) ([]*cache.TaskGroup, error) {
+ if parallelism < math.MinInt32 || parallelism > math.MaxInt32 {
+ return nil, fmt.Errorf("parallelism value %d is out of int32
range", parallelism)
+ }
tg1 := &cache.TaskGroup{
MinMember: int32(parallelism),
Name: mainTaskGroupName,
@@ -113,8 +118,11 @@ func InitTaskGroups(conf SleepPodConfig,
mainTaskGroupName, secondTaskGroupName
// create TG2 more with more members than needed, also make sure that
// placeholders will stay in Pending state
+ if parallelism < math.MinInt32 || parallelism > math.MaxInt32-1 {
+ return nil, fmt.Errorf("parallelism+1 value %d is out of int32
range", parallelism+1)
+ }
tg2 := &cache.TaskGroup{
- MinMember: int32(parallelism + 1),
+ MinMember: int32(parallelism + 1), // nolint: gosec
Name: secondTaskGroupName,
MinResource: map[string]resource.Quantity{
"cpu":
resource.MustParse(strconv.FormatInt(conf.CPU, 10) + "m"),
@@ -129,7 +137,7 @@ func InitTaskGroups(conf SleepPodConfig, mainTaskGroupName,
secondTaskGroupName
tGroups[0] = tg1
tGroups[1] = tg2
- return tGroups
+ return tGroups, nil
}
func InitTaskGroup(conf SleepPodConfig, taskGroupName string, parallelism
int32) []*cache.TaskGroup {
diff --git a/test/e2e/framework/helpers/k8s/k8s_utils.go
b/test/e2e/framework/helpers/k8s/k8s_utils.go
index 77a472f5..e2d5f261 100644
--- a/test/e2e/framework/helpers/k8s/k8s_utils.go
+++ b/test/e2e/framework/helpers/k8s/k8s_utils.go
@@ -631,7 +631,11 @@ func GetConfigMapObj(yamlPath string) (*v1.ConfigMap,
error) {
if err != nil {
return nil, err
}
- return c.(*v1.ConfigMap), err
+ configMap, ok := c.(*v1.ConfigMap)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert object to ConfigMap")
+ }
+ return configMap, nil
}
func (k *KubeCtl) LogNamespaceInfo(file *os.File, ns string) error {
@@ -742,7 +746,11 @@ func GetPodObj(yamlPath string) (*v1.Pod, error) {
if err != nil {
return nil, err
}
- return o.(*v1.Pod), err
+ pod, ok := o.(*v1.Pod)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert object to Pod")
+ }
+ return pod, nil
}
func (k *KubeCtl) CreateDeployment(deployment *appsv1.Deployment, namespace
string) (*appsv1.Deployment, error) {
@@ -1096,7 +1104,11 @@ func GetSecretObj(yamlPath string) (*v1.Secret, error) {
if err != nil {
return nil, err
}
- return o.(*v1.Secret), err
+ secret, ok := o.(*v1.Secret)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert object to Secret")
+ }
+ return secret, err
}
func (k *KubeCtl) CreateServiceAccount(accountName string, namespace string)
(*v1.ServiceAccount, error) {
diff --git a/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
b/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
index 91cede91..102bc335 100644
--- a/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
+++ b/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
@@ -317,7 +317,7 @@ func (c *RClient) isAppInDesiredState(partition string,
queue string, appID stri
case state:
return true, nil
case States().Application.Rejected:
- return false, fmt.Errorf(fmt.Sprintf("App not in
desired state: %s", state))
+ return false, fmt.Errorf("app not in desired state:
%s", state)
}
return false, nil
}
@@ -383,7 +383,7 @@ func (c *RClient) AreAllExecPodsAllotted(partition string,
queueName string, app
return false, err
}
if appInfo.Allocations == nil {
- return false, fmt.Errorf(fmt.Sprintf("Allocations are
not yet complete for appID: %s", appID))
+ return false, fmt.Errorf("allocations are not yet
complete for appID: %s", appID)
}
if len(appInfo.Allocations) >= execPodCount {
return true, nil
diff --git a/test/e2e/recovery_and_restart/recovery_and_restart_test.go
b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
index d78326f9..37bd69c1 100644
--- a/test/e2e/recovery_and_restart/recovery_and_restart_test.go
+++ b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
@@ -200,7 +200,8 @@ var _ = ginkgo.Describe("", func() {
appID := gangSleepJobPrefix + "-" + common.RandSeq(5)
sleepPodConfig := k8s.SleepPodConfig{Name: "gang-sleep-job",
NS: dev, Time: 1, AppID: appID}
- taskGroups := k8s.InitTaskGroups(sleepPodConfig, taskGroupA,
taskGroupB, parallelism)
+ taskGroups, taskGroupErr := k8s.InitTaskGroups(sleepPodConfig,
taskGroupA, taskGroupB, parallelism)
+ Ω(taskGroupErr).NotTo(gomega.HaveOccurred())
pod, podErr := k8s.InitSleepPod(sleepPodConfig)
Ω(podErr).NotTo(gomega.HaveOccurred())
pod = k8s.DecoratePodForGangScheduling(30, "Soft", taskGroupA,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]