This is an automated email from the ASF dual-hosted git repository.
chia7712 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-core.git
The following commit(s) were added to refs/heads/master by this push:
new 21303191 [YUNIKORN-2729] cleanup remaining lint warnings in order to
remove --new-from-rev from Makefile (#915)
21303191 is described below
commit 21303191a9c9ee791de3371ddca973df51b7a755
Author: 0lai0 <[email protected]>
AuthorDate: Thu Jul 18 18:16:33 2024 +0800
[YUNIKORN-2729] cleanup remaining lint warnings in order to remove
--new-from-rev from Makefile (#915)
Closes: #915
Signed-off-by: Chia-Ping Tsai <[email protected]>
---
Makefile | 2 +-
pkg/common/configs/configvalidator.go | 3 +-
pkg/common/configs/configvalidator_test.go | 4 +-
pkg/common/security/usergroup.go | 1 -
pkg/common/security/usergroup_test.go | 23 ++++++---
pkg/common/security/usergroup_test_resolver.go | 18 ++++---
pkg/events/event_system.go | 1 -
pkg/scheduler/context.go | 1 -
pkg/scheduler/objects/allocation_ask.go | 1 -
pkg/scheduler/objects/allocation_ask_test.go | 6 ++-
pkg/scheduler/objects/allocation_test.go | 4 +-
pkg/scheduler/objects/application.go | 1 -
pkg/scheduler/objects/node_test.go | 2 -
pkg/scheduler/partition.go | 1 -
pkg/scheduler/partition_manager.go | 1 -
pkg/scheduler/placement/filter_test.go | 47 ++++++++----------
pkg/scheduler/placement/fixed_rule_test.go | 12 +----
pkg/scheduler/placement/placement_test.go | 10 ++--
pkg/scheduler/placement/provided_rule_test.go | 5 +-
pkg/scheduler/placement/recovery_rule_test.go | 13 +----
pkg/scheduler/placement/rule.go | 4 +-
pkg/scheduler/placement/rule_test.go | 4 +-
pkg/scheduler/placement/tag_rule_test.go | 12 +----
pkg/scheduler/placement/testrule_test.go | 11 ++++
pkg/scheduler/tests/reservation_test.go | 8 +--
pkg/scheduler/tests/smoke_test.go | 11 ++--
pkg/scheduler/ugm/group_tracker.go | 7 ---
pkg/webservice/handler_mock_test.go | 30 +++++------
pkg/webservice/handlers_test.go | 69 +++++++++++---------------
pkg/webservice/webservice.go | 1 -
30 files changed, 137 insertions(+), 176 deletions(-)
diff --git a/Makefile b/Makefile
index 88b12cb7..34af9933 100644
--- a/Makefile
+++ b/Makefile
@@ -131,7 +131,7 @@ lint: $(GOLANGCI_LINT_BIN)
@git symbolic-ref -q HEAD && REV="origin/HEAD" || REV="HEAD^" ; \
headSHA=$$(git rev-parse --short=12 $${REV}) ; \
echo "checking against commit sha $${headSHA}" ; \
- "${GOLANGCI_LINT_BIN}" run --new-from-rev=$${headSHA}
+ "${GOLANGCI_LINT_BIN}" run
# Check scripts
.PHONY: check_scripts
diff --git a/pkg/common/configs/configvalidator.go
b/pkg/common/configs/configvalidator.go
index 87003260..04acec17 100644
--- a/pkg/common/configs/configvalidator.go
+++ b/pkg/common/configs/configvalidator.go
@@ -664,7 +664,8 @@ func checkQueues(queue *QueueConfig, level int) error {
}
// recurse into the depth if this level passed
- for _, child := range queue.Queues {
+ for _, q := range queue.Queues {
+ child := q
err = checkQueues(&child, level+1)
if err != nil {
return err
diff --git a/pkg/common/configs/configvalidator_test.go
b/pkg/common/configs/configvalidator_test.go
index 82155c28..f584090a 100644
--- a/pkg/common/configs/configvalidator_test.go
+++ b/pkg/common/configs/configvalidator_test.go
@@ -2007,8 +2007,9 @@ func TestCheckLimits(t *testing.T) { //nolint:funlen
}
for _, testCase := range testCases {
+ config := testCase.config
t.Run(testCase.name, func(t *testing.T) {
- err := checkLimits(testCase.config.Limits,
testCase.config.Name, &testCase.config)
+ err := checkLimits(testCase.config.Limits,
testCase.config.Name, &config)
if testCase.errMsg != "" {
assert.ErrorContains(t, err, testCase.errMsg)
} else {
@@ -2194,7 +2195,6 @@ func TestCheckQueuesStructure(t *testing.T) {
if tc.validateFunc != nil {
tc.validateFunc(t, tc.partition)
}
-
})
}
}
diff --git a/pkg/common/security/usergroup.go b/pkg/common/security/usergroup.go
index f1c8454f..d9a1966c 100644
--- a/pkg/common/security/usergroup.go
+++ b/pkg/common/security/usergroup.go
@@ -71,7 +71,6 @@ type UserGroup struct {
// * NO resolver: default, no user or group resolution just return the info
(k8s use case)
// * OS resolver: uses the OS libraries to resolve user and group memberships
// * Test resolver: fake resolution for testing
-// TODO need to make this fully configurable and look at reflection etc
func GetUserGroupCache(resolver string) *UserGroupCache {
once.Do(func() {
switch resolver {
diff --git a/pkg/common/security/usergroup_test.go
b/pkg/common/security/usergroup_test.go
index 0540fe04..6c0d9f45 100644
--- a/pkg/common/security/usergroup_test.go
+++ b/pkg/common/security/usergroup_test.go
@@ -89,8 +89,11 @@ func TestGetUserGroup(t *testing.T) {
testCache.resetCache()
// test cache should be empty now
assert.Equal(t, 0, testCache.getUGsize(), "Cache is not empty: %v",
testCache.getUGmap())
-
- ug, err := testCache.GetUserGroup("testuser1")
+ ugi := &si.UserGroupInformation{
+ User: "testuser1",
+ Groups: nil,
+ }
+ ug, err := testCache.GetUserGroup(ugi.User)
assert.NilError(t, err, "Lookup should not have failed: testuser1")
if ug.failed {
t.Errorf("lookup failed which should not have: %t", ug.failed)
@@ -99,11 +102,11 @@ func TestGetUserGroup(t *testing.T) {
t.Errorf("Cache not updated should have 1 entry %d",
len(testCache.ugs))
}
// check returned info: primary and secondary groups etc
- if ug.User != "testuser1" || len(ug.Groups) != 2 || ug.resolved == 0 ||
ug.failed {
+ if ug.User != ugi.User || len(ug.Groups) != 2 || ug.resolved == 0 ||
ug.failed {
t.Errorf("User 'testuser1' not resolved correctly: %v", ug)
}
testCache.lock.Lock()
- cachedUG := testCache.ugs["testuser1"]
+ cachedUG := testCache.ugs[ugi.User]
if ug.resolved != cachedUG.resolved {
t.Errorf("User 'testuser1' not cached correctly resolution time
differs: %d got %d", ug.resolved, cachedUG.resolved)
}
@@ -111,7 +114,7 @@ func TestGetUserGroup(t *testing.T) {
cachedUG.resolved -= 5
testCache.lock.Unlock()
- ug, err = testCache.GetUserGroup("testuser1")
+ ug, err = testCache.GetUserGroup(ugi.User)
if err != nil || ug.resolved != cachedUG.resolved {
t.Errorf("User 'testuser1' not returned from Cache, resolution
time differs: %d got %d (err = %v)", ug.resolved, cachedUG.resolved, err)
}
@@ -178,16 +181,20 @@ func TestGetUserGroupFail(t *testing.T) {
}
// resolve a non existing user
- ug, err = testCache.GetUserGroup("unknown")
+ ugi := &si.UserGroupInformation{
+ User: "unknown",
+ Groups: nil,
+ }
+ ug, err = testCache.GetUserGroup(ugi.User)
if err == nil {
t.Error("Lookup should have failed: unknown user")
}
// ug is partially filled and failed flag is set
- if ug.User != "unknown" || len(ug.Groups) != 0 || !ug.failed {
+ if ug.User != ugi.User || len(ug.Groups) != 0 || !ug.failed {
t.Errorf("UserGroup is not empty: %v", ug)
}
- ug, err = testCache.GetUserGroup("unknown")
+ ug, err = testCache.GetUserGroup(ugi.User)
if err == nil {
t.Error("Lookup should have failed: unknown user")
}
diff --git a/pkg/common/security/usergroup_test_resolver.go
b/pkg/common/security/usergroup_test_resolver.go
index b22b60b2..c35dd6bb 100644
--- a/pkg/common/security/usergroup_test_resolver.go
+++ b/pkg/common/security/usergroup_test_resolver.go
@@ -25,6 +25,12 @@ import (
"time"
)
+const (
+ Testuser1 = "testuser1"
+ Testuser2 = "testuser2"
+ Testuser3 = "testuser3"
+)
+
// Get the cache with a test resolver
// cleaner runs every second
func GetUserGroupCacheTest() *UserGroupCache {
@@ -41,7 +47,7 @@ func GetUserGroupCacheTest() *UserGroupCache {
// test function only
func lookup(userName string) (*user.User, error) {
// 1st test user: all OK
- if userName == "testuser1" {
+ if userName == Testuser1 {
return &user.User{
Uid: "1000",
Gid: "1000",
@@ -49,14 +55,14 @@ func lookup(userName string) (*user.User, error) {
}, nil
}
// 2nd test user: primary group does not resolve
- if userName == "testuser2" {
+ if userName == Testuser2 {
return &user.User{
Uid: "100",
Gid: "100",
Username: "testuser2",
}, nil
}
- if userName == "testuser3" {
+ if userName == Testuser3 {
return &user.User{
Uid: "1001",
Gid: "1001",
@@ -106,14 +112,14 @@ func lookupGroupID(gid string) (*user.Group, error) {
// test function only
func groupIds(osUser *user.User) ([]string, error) {
- if osUser.Username == "testuser1" {
+ if osUser.Username == Testuser1 {
return []string{"1001"}, nil
}
- if osUser.Username == "testuser2" {
+ if osUser.Username == Testuser2 {
return []string{"1001", "1002"}, nil
}
// group list might return primary group ID also
- if osUser.Username == "testuser3" {
+ if osUser.Username == Testuser3 {
return []string{"1002", "1001", "1003", "1004"}, nil
}
diff --git a/pkg/events/event_system.go b/pkg/events/event_system.go
index 2702d57c..b6fc2b26 100644
--- a/pkg/events/event_system.go
+++ b/pkg/events/event_system.go
@@ -35,7 +35,6 @@ import (
// need to change for testing
var defaultEventChannelSize = 100000
-var defaultRingBufferSize uint64 = 100000
var once sync.Once
var ev EventSystem
diff --git a/pkg/scheduler/context.go b/pkg/scheduler/context.go
index 39715b44..f5329c22 100644
--- a/pkg/scheduler/context.go
+++ b/pkg/scheduler/context.go
@@ -555,7 +555,6 @@ func (cc *ClusterContext)
handleRMUpdateApplicationEvent(event *rmevent.RMUpdate
// Update metrics with removed applications
if len(request.Remove) > 0 {
metrics.GetSchedulerMetrics().SubTotalApplicationsRunning(len(request.Remove))
- // ToDO: need to improve this once we have state in YuniKorn
for apps.
metrics.GetSchedulerMetrics().AddTotalApplicationsCompleted(len(request.Remove))
for _, app := range request.Remove {
partition := cc.GetPartition(app.PartitionName)
diff --git a/pkg/scheduler/objects/allocation_ask.go
b/pkg/scheduler/objects/allocation_ask.go
index 09be0acb..62748de9 100644
--- a/pkg/scheduler/objects/allocation_ask.go
+++ b/pkg/scheduler/objects/allocation_ask.go
@@ -87,7 +87,6 @@ func NewAllocationAsk(allocationKey string, applicationID
string, allocatedResou
}
func NewAllocationAskFromSI(ask *si.AllocationAsk) *AllocationAsk {
-
var createTime time.Time
siCreationTime, err :=
strconv.ParseInt(ask.Tags[siCommon.CreationTime], 10, 64)
if err != nil {
diff --git a/pkg/scheduler/objects/allocation_ask_test.go
b/pkg/scheduler/objects/allocation_ask_test.go
index 6a97b041..6f6c75d1 100644
--- a/pkg/scheduler/objects/allocation_ask_test.go
+++ b/pkg/scheduler/objects/allocation_ask_test.go
@@ -126,6 +126,7 @@ func TestPlaceHolder(t *testing.T) {
ask := NewAllocationAskFromSI(siAsk)
assert.Assert(t, !ask.IsPlaceholder(), "standard ask should not be a
placeholder")
assert.Equal(t, ask.GetTaskGroup(), "", "standard ask should not have a
TaskGroupName")
+
siAsk = &si.AllocationAsk{
AllocationKey: "ask1",
ApplicationID: "app1",
@@ -136,11 +137,12 @@ func TestPlaceHolder(t *testing.T) {
ask = NewAllocationAskFromSI(siAsk)
var nilAsk *AllocationAsk
assert.Equal(t, ask, nilAsk, "placeholder ask created without a
TaskGroupName")
- siAsk.TaskGroupName = "testgroup"
+
+ siAsk.TaskGroupName = "TestPlaceHolder"
ask = NewAllocationAskFromSI(siAsk)
assert.Assert(t, ask != nilAsk, "placeholder ask creation failed
unexpectedly")
assert.Assert(t, ask.IsPlaceholder(), "ask should have been a
placeholder")
- assert.Equal(t, ask.GetTaskGroup(), "testgroup", "TaskGroupName not set
as expected")
+ assert.Equal(t, ask.GetTaskGroup(), siAsk.TaskGroupName, "TaskGroupName
not set as expected")
}
func TestGetRequiredNode(t *testing.T) {
diff --git a/pkg/scheduler/objects/allocation_test.go
b/pkg/scheduler/objects/allocation_test.go
index 2f2833dc..9a1964b7 100644
--- a/pkg/scheduler/objects/allocation_test.go
+++ b/pkg/scheduler/objects/allocation_test.go
@@ -238,12 +238,12 @@ func TestNewAllocFromSI(t *testing.T) {
var nilAlloc *Allocation
alloc := NewAllocationFromSI(allocSI)
assert.Equal(t, alloc, nilAlloc, "placeholder allocation created
without a TaskGroupName")
- allocSI.TaskGroupName = "testgroup"
+ allocSI.TaskGroupName = "TestNewAllocFromSI"
alloc = NewAllocationFromSI(allocSI)
assert.Assert(t, alloc != nilAlloc, "placeholder ask creation failed
unexpectedly")
assert.Assert(t, alloc.GetAsk().IsPlaceholder(), "ask should have been
a placeholder")
assert.Assert(t, alloc.IsPlaceholder(), "allocation should have been a
placeholder")
- assert.Equal(t, alloc.GetTaskGroup(), "testgroup", "TaskGroupName not
set as expected")
+ assert.Equal(t, alloc.GetTaskGroup(), allocSI.TaskGroupName,
"TaskGroupName not set as expected")
assert.Equal(t, alloc.GetAsk().GetCreateTime(), time.Unix(past, 0))
//nolint:staticcheck
assert.Assert(t, alloc.GetAsk().IsOriginator(), "ask should have been
an originator")
assert.Assert(t, alloc.IsOriginator(), "allocation should have been an
originator")
diff --git a/pkg/scheduler/objects/application.go
b/pkg/scheduler/objects/application.go
index b15e8e80..b6db324c 100644
--- a/pkg/scheduler/objects/application.go
+++ b/pkg/scheduler/objects/application.go
@@ -1445,7 +1445,6 @@ func (sa *Application) tryNodes(ask *AllocationAsk,
iterator NodeIterator) *Allo
return false
}
// nothing allocated should we look at a reservation?
- // TODO make this smarter a hardcoded delay is not the right
thing
askAge := time.Since(ask.GetCreateTime())
if allowReserve && askAge > reservationDelay {
log.Log(log.SchedApplication).Debug("app reservation
check",
diff --git a/pkg/scheduler/objects/node_test.go
b/pkg/scheduler/objects/node_test.go
index c3daf2a2..274fb8a0 100644
--- a/pkg/scheduler/objects/node_test.go
+++ b/pkg/scheduler/objects/node_test.go
@@ -107,8 +107,6 @@ func TestCheckConditions(t *testing.T) {
if !node.preAllocateConditions(ask) {
t.Error("node with scheduling set to true no plugins should
allow allocation")
}
-
- // TODO add mock for plugin to extend tests
}
func TestPreAllocateCheck(t *testing.T) {
diff --git a/pkg/scheduler/partition.go b/pkg/scheduler/partition.go
index ed4b03bb..c7677b16 100644
--- a/pkg/scheduler/partition.go
+++ b/pkg/scheduler/partition.go
@@ -127,7 +127,6 @@ func (pc *PartitionContext) initialPartitionFromConfig(conf
configs.PartitionCon
// Placing an application will not have a lock on the partition context.
pc.placementManager =
placement.NewPlacementManager(conf.PlacementRules, pc.GetQueue)
// get the user group cache for the partition
- // TODO get the resolver from the config
pc.userGroupCache = security.GetUserGroupCache("")
pc.updateNodeSortingPolicy(conf)
pc.updatePreemption(conf)
diff --git a/pkg/scheduler/partition_manager.go
b/pkg/scheduler/partition_manager.go
index 14e19465..50427826 100644
--- a/pkg/scheduler/partition_manager.go
+++ b/pkg/scheduler/partition_manager.go
@@ -123,7 +123,6 @@ func (manager *partitionManager) cleanQueues(queue
*objects.Queue) {
zap.String("queue", queue.QueuePath))
}
} else {
- // TODO time out waiting for draining and removal
log.Log(log.SchedPartition).Debug("skip removing the
queue",
zap.String("reason", "there are existing
assigned apps or leaf queues"),
zap.String("queue", queue.QueuePath),
diff --git a/pkg/scheduler/placement/filter_test.go
b/pkg/scheduler/placement/filter_test.go
index ddda0452..2ef755cb 100644
--- a/pkg/scheduler/placement/filter_test.go
+++ b/pkg/scheduler/placement/filter_test.go
@@ -316,68 +316,63 @@ func TestFilterGroup(t *testing.T) {
// test allowing user access with user list
func TestAllowUser(t *testing.T) {
// user object to test with
- userObj := security.UserGroup{
- User: "",
+ user1Obj := security.UserGroup{
+ User: "user1",
+ Groups: nil,
+ }
+ user2Obj := security.UserGroup{
+ User: "user2",
Groups: nil,
}
// test deny user list
conf := configs.Filter{}
conf.Type = filterDeny
- conf.Users = []string{"user1"}
-
+ conf.Users = []string{user1Obj.User}
filter := newFilter(conf)
- userObj.User = "user1"
- if filter.allowUser(userObj) {
+ if filter.allowUser(user1Obj) {
t.Error("deny filter did not deny user 'user1' while in list")
}
- userObj.User = "user2"
- if !filter.allowUser(userObj) {
+ if !filter.allowUser(user2Obj) {
t.Error("deny filter did deny user 'user2' while not in list")
}
// test allow user list
conf = configs.Filter{}
conf.Type = filterAllow
- conf.Users = []string{"user1"}
+ conf.Users = []string{user1Obj.User}
filter = newFilter(conf)
- userObj.User = "user1"
- if !filter.allowUser(userObj) {
+ if !filter.allowUser(user1Obj) {
t.Error("allow filter did not allow user 'user1' while in list")
}
- userObj.User = "user2"
- if filter.allowUser(userObj) {
- t.Error("allow filter did allow user 'user1' while not in list")
+ if filter.allowUser(user2Obj) {
+ t.Error("allow filter did allow user 'user2' while not in list")
}
// test deny user exp
conf = configs.Filter{}
conf.Type = filterDeny
- conf.Users = []string{"user[0-9]"}
+ conf.Users = []string{"user[0-1]"}
filter = newFilter(conf)
- userObj.User = "user1"
- if filter.allowUser(userObj) {
+ if filter.allowUser(user1Obj) {
t.Error("deny filter did not deny user 'user1' while in
expression")
}
- userObj.User = "nomatch"
- if !filter.allowUser(userObj) {
- t.Error("deny filter did deny user 'nomatch' while not in
expression")
+ if !filter.allowUser(user2Obj) {
+ t.Error("deny filter did deny user 'user2' while not in
expression")
}
// test allow user exp
conf = configs.Filter{}
conf.Type = filterAllow
- conf.Users = []string{"user[0-9]"}
+ conf.Users = []string{"user[0-1]"}
filter = newFilter(conf)
- userObj.User = "user1"
- if !filter.allowUser(userObj) {
+ if !filter.allowUser(user1Obj) {
t.Error("allow filter did not allow user 'user1' while in
expression")
}
- userObj.User = "nomatch"
- if filter.allowUser(userObj) {
- t.Error("allow filter did allow user 'nomatch' while not in
expression")
+ if filter.allowUser(user2Obj) {
+ t.Error("allow filter did allow user 'user2' while not in
expression")
}
}
diff --git a/pkg/scheduler/placement/fixed_rule_test.go
b/pkg/scheduler/placement/fixed_rule_test.go
index 22bf025f..078d3bc3 100644
--- a/pkg/scheduler/placement/fixed_rule_test.go
+++ b/pkg/scheduler/placement/fixed_rule_test.go
@@ -61,17 +61,7 @@ func TestFixedRule(t *testing.T) {
}
func TestFixedRulePlace(t *testing.T) {
- // Create the structure for the test
- data := `
-partitions:
- - name: default
- queues:
- - name: testqueue
- - name: testparent
- queues:
- - name: testchild
-`
- err := initQueueStructure([]byte(data))
+ err := initQueueStructure([]byte(confTestQueue))
assert.NilError(t, err, "setting up the queue config failed")
user := security.UserGroup{
diff --git a/pkg/scheduler/placement/placement_test.go
b/pkg/scheduler/placement/placement_test.go
index 5db70118..9ea73a78 100644
--- a/pkg/scheduler/placement/placement_test.go
+++ b/pkg/scheduler/placement/placement_test.go
@@ -193,7 +193,7 @@ func TestManagerBuildRule(t *testing.T) {
t.Errorf("test rule build should not have failed and created 2
top level rule, err: %v, rules: %v", err, ruleObjs)
} else {
parent := ruleObjs[0].getParent()
- if parent == nil || parent.getName() != "test" {
+ if parent == nil || parent.getName() != rules[0].Name {
t.Error("test rule build should have created 2 rules:
parent not found")
}
}
@@ -206,7 +206,7 @@ func TestManagerBuildRule(t *testing.T) {
ruleObjs, err = buildRules(rules)
if err != nil || len(ruleObjs) != 3 {
t.Errorf("rule build should not have failed and created 3
rules, err: %v, rules: %v", err, ruleObjs)
- } else if ruleObjs[0].getName() != "user" || ruleObjs[1].getName() !=
"test" || ruleObjs[2].getName() != "recovery" {
+ } else if ruleObjs[0].getName() != rules[0].Name ||
ruleObjs[1].getName() != rules[1].Name || ruleObjs[2].getName() != "recovery" {
t.Errorf("rule build order is not preserved: %v", ruleObjs)
}
}
@@ -260,9 +260,8 @@ partitions:
// user rule existing queue, acl allowed
err = man.PlaceApplication(app)
queueName := app.GetQueuePath()
- if err != nil || queueName != "root.testparent.testchild" {
- t.Errorf("leaf exist: app should have been placed in user
queue, queue: '%s', error: %v", queueName, err)
- }
+ assert.NilError(t, err)
+ assert.Equal(t, "root.testparent.testchild", queueName)
user = security.UserGroup{
User: "other-user",
Groups: []string{},
@@ -326,6 +325,7 @@ partitions:
}
}
+//nolint:funlen
func TestForcePlaceApp(t *testing.T) {
const (
provided = "provided"
diff --git a/pkg/scheduler/placement/provided_rule_test.go
b/pkg/scheduler/placement/provided_rule_test.go
index cc2f8c77..b1ef26e0 100644
--- a/pkg/scheduler/placement/provided_rule_test.go
+++ b/pkg/scheduler/placement/provided_rule_test.go
@@ -119,9 +119,8 @@ partitions:
// unqualified queue with parent rule that exists directly in hierarchy
appInfo = newApplication("app1", "default", "testchild", user, tags,
nil, "")
queue, err = pr.placeApplication(appInfo, queueFunc)
- if queue != "root.testparent.testchild" || err != nil {
- t.Errorf("provided rule failed to place queue in correct queue
'%s', err %v", queue, err)
- }
+ assert.NilError(t, err)
+ assert.Equal(t, "root.testparent.testchild", queue)
// qualified queue with parent rule (parent rule ignored)
appInfo = newApplication("app1", "default", "root.testparent", user,
tags, nil, "")
diff --git a/pkg/scheduler/placement/recovery_rule_test.go
b/pkg/scheduler/placement/recovery_rule_test.go
index 61c0d443..fca15a6f 100644
--- a/pkg/scheduler/placement/recovery_rule_test.go
+++ b/pkg/scheduler/placement/recovery_rule_test.go
@@ -41,18 +41,7 @@ func TestRecoveryRuleInitialise(t *testing.T) {
func TestRecoveryRulePlace(t *testing.T) {
rr := &recoveryRule{}
-
- // Create the structure for the test
- data := `
-partitions:
- - name: default
- queues:
- - name: testqueue
- - name: testparent
- queues:
- - name: testchild
-`
- err := initQueueStructure([]byte(data))
+ err := initQueueStructure([]byte(confTestQueue))
assert.NilError(t, err, "setting up the queue config failed")
// verify that non-forced app is not recovered
diff --git a/pkg/scheduler/placement/rule.go b/pkg/scheduler/placement/rule.go
index cdc542e1..7d23ed30 100644
--- a/pkg/scheduler/placement/rule.go
+++ b/pkg/scheduler/placement/rule.go
@@ -72,12 +72,14 @@ func (r *basicRule) getParent() rule {
return r.parent
}
+const unnamedRuleName = "unnamed rule"
+
// getName returns the name if not overwritten by the rule.
// Marked as nolint as rules should override this.
//
//nolint:unused
func (r *basicRule) getName() string {
- return "unnamed rule"
+ return unnamedRuleName
}
// ruleDAO returns the RuleDAO object if not overwritten by the rule.
diff --git a/pkg/scheduler/placement/rule_test.go
b/pkg/scheduler/placement/rule_test.go
index 308f4571..61f31155 100644
--- a/pkg/scheduler/placement/rule_test.go
+++ b/pkg/scheduler/placement/rule_test.go
@@ -118,12 +118,12 @@ func TestReplaceDot(t *testing.T) {
func TestBasicRule(t *testing.T) {
rule := &basicRule{}
- if name := rule.getName(); name != "unnamed rule" {
+ if name := rule.getName(); name != unnamedRuleName {
t.Errorf("expected %s, got %s", "unnamed rule", name)
}
dao := rule.ruleDAO()
- if dao.Name != "unnamed rule" {
+ if dao.Name != unnamedRuleName {
t.Errorf("expected %s, got %s", "unnamed rule", dao.Name)
}
}
diff --git a/pkg/scheduler/placement/tag_rule_test.go
b/pkg/scheduler/placement/tag_rule_test.go
index 5d87a5ac..bf9a56fb 100644
--- a/pkg/scheduler/placement/tag_rule_test.go
+++ b/pkg/scheduler/placement/tag_rule_test.go
@@ -62,17 +62,7 @@ func TestTagRule(t *testing.T) {
//nolint:funlen
func TestTagRulePlace(t *testing.T) {
- // Create the structure for the test
- data := `
-partitions:
- - name: default
- queues:
- - name: testqueue
- - name: testparent
- queues:
- - name: testchild
-`
- err := initQueueStructure([]byte(data))
+ err := initQueueStructure([]byte(confTestQueue))
assert.NilError(t, err, "setting up the queue config failed")
user := security.UserGroup{
diff --git a/pkg/scheduler/placement/testrule_test.go
b/pkg/scheduler/placement/testrule_test.go
index 8ca7e6a8..38bc24fe 100644
--- a/pkg/scheduler/placement/testrule_test.go
+++ b/pkg/scheduler/placement/testrule_test.go
@@ -43,6 +43,17 @@ partitions:
`
const nameParentChild = "root.testparentnew.testchild"
+// Create the structure for the test
+const confTestQueue = `
+partitions:
+ - name: default
+ queues:
+ - name: testqueue
+ - name: testparent
+ queues:
+ - name: testchild
+`
+
var root *objects.Queue
// Mocked up function to mimic the partition getQueue function
diff --git a/pkg/scheduler/tests/reservation_test.go
b/pkg/scheduler/tests/reservation_test.go
index 3cd59c20..442d94ea 100644
--- a/pkg/scheduler/tests/reservation_test.go
+++ b/pkg/scheduler/tests/reservation_test.go
@@ -77,6 +77,7 @@ partitions:
preemption:
enabled: false
`
+ queueName = "root.leaf-1"
)
// simple reservation one app one queue
@@ -95,7 +96,6 @@ func TestBasicReservation(t *testing.T) {
nodes := createNodes(t, ms, 2, 50000000, 50000)
ms.mockRM.waitForMinAcceptedNodes(t, 2, 1000)
- queueName := "root.leaf-1"
err = ms.addApp(appID1, queueName, "default")
assert.NilError(t, err, "adding app to scheduler failed")
@@ -171,8 +171,7 @@ func TestReservationForTwoQueues(t *testing.T) {
ms.mockRM.waitForMinAcceptedNodes(t, 2, 1000)
// add the first scheduling app
- leaf1Name := "root.leaf-1"
- err = ms.addApp(appID1, leaf1Name, "default")
+ err = ms.addApp(appID1, queueName, "default")
assert.NilError(t, err, "adding app 1 to scheduler failed")
ms.mockRM.waitForAcceptedApplication(t, appID1, 1000)
@@ -274,7 +273,6 @@ func TestRemoveReservedNode(t *testing.T) {
nodes := createNodes(t, ms, 2, 50000000, 50000)
ms.mockRM.waitForMinAcceptedNodes(t, 2, 1000)
- queueName := "root.leaf-1"
err = ms.addApp(appID1, queueName, "default")
assert.NilError(t, err, "adding app to scheduler failed")
@@ -327,7 +325,6 @@ func TestAddNewNode(t *testing.T) {
ms.mockRM.waitForMinAcceptedNodes(t, 3, 1000)
ms.scheduler.GetClusterContext().GetPartition(ms.partitionName).GetNode(nodes[2]).SetSchedulable(false)
- queueName := "root.leaf-1"
err = ms.addApp(appID1, queueName, "default")
assert.NilError(t, err, "adding app to scheduler failed")
@@ -384,7 +381,6 @@ func TestUnReservationAndDeletion(t *testing.T) {
nodes := createNodes(t, ms, 2, 30000000, 30000)
ms.mockRM.waitForMinAcceptedNodes(t, 2, 1000)
- queueName := "root.leaf-1"
err = ms.addApp(appID1, queueName, "default")
assert.NilError(t, err, "adding app to scheduler failed")
ms.mockRM.waitForAcceptedApplication(t, appID1, 1000)
diff --git a/pkg/scheduler/tests/smoke_test.go
b/pkg/scheduler/tests/smoke_test.go
index dff62e3f..546e8ed6 100644
--- a/pkg/scheduler/tests/smoke_test.go
+++ b/pkg/scheduler/tests/smoke_test.go
@@ -33,19 +33,22 @@ import (
"github.com/apache/yunikorn-scheduler-interface/lib/go/si"
)
-const configDataSmokeTest = `
+const (
+ configDataSmokeTest = `
partitions:
- name: default
queues:
- name: root
submitacl: "*"
queues:
- - name: singleleaf
+ - name: leaf
resources:
max:
memory: 150M
vcore: 20
`
+ leafName = "root.leaf"
+)
// Test scheduler reconfiguration
func TestConfigScheduler(t *testing.T) {
@@ -168,7 +171,6 @@ func TestBasicScheduler(t *testing.T) {
err := ms.Init(configDataSmokeTest, false, false)
assert.NilError(t, err, "RegisterResourceManager failed")
- leafName := "root.singleleaf"
// Check queues of cache and scheduler.
part := ms.scheduler.GetClusterContext().GetPartition(partition)
assert.Assert(t, part.GetTotalPartitionResource() == nil, "partition
info max resource nil")
@@ -417,7 +419,6 @@ func TestBasicSchedulerAutoAllocation(t *testing.T) {
err := ms.Init(configDataSmokeTest, true, false)
assert.NilError(t, err, "RegisterResourceManager failed")
- leafName := "root.singleleaf"
appID := appID1
// Register a node, and add apps
@@ -639,7 +640,6 @@ partitions:
err := ms.Init(configData, false, false)
assert.NilError(t, err, "RegisterResourceManager failed")
- leafName := "root.leaf"
app1ID := appID1
app2ID := appID2
@@ -1293,7 +1293,6 @@ func TestDupReleasesInGangScheduling(t *testing.T) {
err := ms.Init(configDataSmokeTest, false, false)
assert.NilError(t, err, "RegisterResourceManager failed")
- leafName := "root.singleleaf"
part := ms.scheduler.GetClusterContext().GetPartition(partition)
root := part.GetQueue("root")
leaf := part.GetQueue(leafName)
diff --git a/pkg/scheduler/ugm/group_tracker.go
b/pkg/scheduler/ugm/group_tracker.go
index ffc0292f..d6ec777f 100644
--- a/pkg/scheduler/ugm/group_tracker.go
+++ b/pkg/scheduler/ugm/group_tracker.go
@@ -137,13 +137,6 @@ func (gt *GroupTracker) canBeRemoved() bool {
return gt.queueTracker.canBeRemoved()
}
-func (gt *GroupTracker) getName() string {
- if gt == nil {
- return common.Empty
- }
- return gt.groupName
-}
-
func (gt *GroupTracker) decreaseAllTrackedResourceUsage(hierarchy []string)
map[string]string {
if gt == nil {
return nil
diff --git a/pkg/webservice/handler_mock_test.go
b/pkg/webservice/handler_mock_test.go
index 8f09b807..1f93c730 100644
--- a/pkg/webservice/handler_mock_test.go
+++ b/pkg/webservice/handler_mock_test.go
@@ -1,19 +1,19 @@
/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
*/
package webservice
diff --git a/pkg/webservice/handlers_test.go b/pkg/webservice/handlers_test.go
index 2231ff98..619d9b56 100644
--- a/pkg/webservice/handlers_test.go
+++ b/pkg/webservice/handlers_test.go
@@ -666,31 +666,27 @@ func TestGetNodesUtilJSON(t *testing.T) {
partition := setup(t, configDefault, 1)
// create test application
- appID := "app1"
- app := newApplication(appID, partition.Name, queueName, rmID,
security.UserGroup{})
+ app := newApplication("app1", partition.Name, queueName, rmID,
security.UserGroup{})
err := partition.AddApplication(app)
assert.NilError(t, err, "add application to partition should not have
failed")
// create test nodes
nodeRes :=
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000}).ToProto()
- node1ID := "node-1"
- node1 := objects.NewNode(&si.NodeInfo{NodeID: node1ID,
SchedulableResource: nodeRes})
- node2ID := "node-2"
+ node1 := objects.NewNode(&si.NodeInfo{NodeID: "node-1",
SchedulableResource: nodeRes})
nodeRes2 :=
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000, "GPU": 10}).ToProto()
- node2 := objects.NewNode(&si.NodeInfo{NodeID: node2ID,
SchedulableResource: nodeRes2})
- node3ID := "node-3"
+ node2 := objects.NewNode(&si.NodeInfo{NodeID: "node-2",
SchedulableResource: nodeRes2})
nodeCPU :=
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.CPU:
1000}).ToProto()
- node3 := objects.NewNode(&si.NodeInfo{NodeID: node3ID,
SchedulableResource: nodeCPU})
+ node3 := objects.NewNode(&si.NodeInfo{NodeID: "node-3",
SchedulableResource: nodeCPU})
// create test allocations
resAlloc1 :=
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
500, siCommon.CPU: 300})
resAlloc2 :=
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
300, siCommon.CPU: 500, "GPU": 5})
- ask1 := objects.NewAllocationAsk("alloc-1", appID, resAlloc1)
- ask2 := objects.NewAllocationAsk("alloc-2", appID, resAlloc2)
- allocs := []*objects.Allocation{objects.NewAllocation(node1ID, ask1)}
+ ask1 := objects.NewAllocationAsk("alloc-1", app.ApplicationID,
resAlloc1)
+ ask2 := objects.NewAllocationAsk("alloc-2", app.ApplicationID,
resAlloc2)
+ allocs := []*objects.Allocation{objects.NewAllocation(node1.NodeID,
ask1)}
err = partition.AddNode(node1, allocs)
assert.NilError(t, err, "add node to partition should not have failed")
- allocs = []*objects.Allocation{objects.NewAllocation(node2ID, ask2)}
+ allocs = []*objects.Allocation{objects.NewAllocation(node2.NodeID,
ask2)}
err = partition.AddNode(node2, allocs)
assert.NilError(t, err, "add node to partition should not have failed")
err = partition.AddNode(node3, nil)
@@ -702,26 +698,26 @@ func TestGetNodesUtilJSON(t *testing.T) {
assert.Equal(t, result.ResourceType, siCommon.Memory)
assert.Equal(t, subResult[2].NumOfNodes, int64(1))
assert.Equal(t, subResult[4].NumOfNodes, int64(1))
- assert.Equal(t, subResult[2].NodeNames[0], node2ID)
- assert.Equal(t, subResult[4].NodeNames[0], node1ID)
+ assert.Equal(t, subResult[2].NodeNames[0], node2.NodeID)
+ assert.Equal(t, subResult[4].NodeNames[0], node1.NodeID)
// three nodes advertise cpu: must show up in the list
result = getNodesUtilJSON(partition, siCommon.CPU)
subResult = result.NodesUtil
assert.Equal(t, result.ResourceType, siCommon.CPU)
assert.Equal(t, subResult[0].NumOfNodes, int64(1))
- assert.Equal(t, subResult[0].NodeNames[0], node3ID)
+ assert.Equal(t, subResult[0].NodeNames[0], node3.NodeID)
assert.Equal(t, subResult[2].NumOfNodes, int64(1))
- assert.Equal(t, subResult[2].NodeNames[0], node1ID)
+ assert.Equal(t, subResult[2].NodeNames[0], node1.NodeID)
assert.Equal(t, subResult[4].NumOfNodes, int64(1))
- assert.Equal(t, subResult[4].NodeNames[0], node2ID)
+ assert.Equal(t, subResult[4].NodeNames[0], node2.NodeID)
// one node advertise GPU: must show up in the list
result = getNodesUtilJSON(partition, "GPU")
subResult = result.NodesUtil
assert.Equal(t, result.ResourceType, "GPU")
assert.Equal(t, subResult[4].NumOfNodes, int64(1))
- assert.Equal(t, subResult[4].NodeNames[0], node2ID)
+ assert.Equal(t, subResult[4].NodeNames[0], node2.NodeID)
result = getNodesUtilJSON(partition, "non-exist")
subResult = result.NodesUtil
@@ -753,10 +749,8 @@ func TestGetNodeUtilisation(t *testing.T) {
assert.Assert(t, confirmNodeCount(utilisation.NodesUtil, 0),
"unexpected number of nodes returned should be 0")
// create test nodes
- node1ID := "node-1"
- node2ID := "node-2"
- node1 := addNode(t, partition, node1ID,
resources.NewResourceFromMap(map[string]resources.Quantity{"first": 10}))
- node2 := addNode(t, partition, node2ID,
resources.NewResourceFromMap(map[string]resources.Quantity{"first": 10,
"second": 5}))
+ node1 := addNode(t, partition, "node-1",
resources.NewResourceFromMap(map[string]resources.Quantity{"first": 10}))
+ node2 := addNode(t, partition, "node-2",
resources.NewResourceFromMap(map[string]resources.Quantity{"first": 10,
"second": 5}))
// get nodes utilization
resp = &MockResponseWriter{}
@@ -770,7 +764,7 @@ func TestGetNodeUtilisation(t *testing.T) {
resAlloc :=
resources.NewResourceFromMap(map[string]resources.Quantity{"first": 10})
ask := objects.NewAllocationAsk("alloc-1", "app", resAlloc)
- alloc := objects.NewAllocation(node1ID, ask)
+ alloc := objects.NewAllocation(node1.NodeID, ask)
assert.Assert(t, node1.AddAllocation(alloc), "unexpected failure adding
allocation to node")
rootQ := partition.GetQueue("root")
err = rootQ.IncAllocatedResource(resAlloc, false)
@@ -788,7 +782,7 @@ func TestGetNodeUtilisation(t *testing.T) {
// make second type dominant by using all
resAlloc =
resources.NewResourceFromMap(map[string]resources.Quantity{"second": 5})
ask = objects.NewAllocationAsk("alloc-2", "app", resAlloc)
- alloc = objects.NewAllocation(node2ID, ask)
+ alloc = objects.NewAllocation(node2.NodeID, ask)
assert.Assert(t, node2.AddAllocation(alloc), "unexpected failure adding
allocation to node")
err = rootQ.IncAllocatedResource(resAlloc, false)
assert.NilError(t, err, "unexpected error returned setting allocated
resource on queue")
@@ -840,14 +834,11 @@ func TestGetPartitionNodesUtilJSON(t *testing.T) {
// setup
partition := setup(t, configDefault, 1)
appID := "app1"
- node1ID := "node-1"
- node2ID := "node-2"
- node3ID := "node-3"
// create test nodes
- node1 := addNode(t, partition, node1ID,
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000}))
- node2 := addNode(t, partition, node2ID,
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000, "GPU": 10}))
- addNode(t, partition, node3ID,
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.CPU: 1000}))
+ node1 := addNode(t, partition, "node-1",
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000}))
+ node2 := addNode(t, partition, "node-2",
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000, "GPU": 10}))
+ node3 := addNode(t, partition, "node-3",
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.CPU: 1000}))
// create test allocations
addAllocatedResource(t, node1, "alloc-1", appID,
map[string]resources.Quantity{siCommon.Memory: 500, siCommon.CPU: 300})
@@ -863,22 +854,22 @@ func TestGetPartitionNodesUtilJSON(t *testing.T) {
memoryNodesUtil := getNodesUtilByType(t, result.NodesUtilList,
siCommon.Memory)
assert.Equal(t, memoryNodesUtil.NodesUtil[2].NumOfNodes, int64(1))
assert.Equal(t, memoryNodesUtil.NodesUtil[4].NumOfNodes, int64(1))
- assert.Equal(t, memoryNodesUtil.NodesUtil[2].NodeNames[0], node2ID)
- assert.Equal(t, memoryNodesUtil.NodesUtil[4].NodeNames[0], node1ID)
+ assert.Equal(t, memoryNodesUtil.NodesUtil[2].NodeNames[0], node2.NodeID)
+ assert.Equal(t, memoryNodesUtil.NodesUtil[4].NodeNames[0], node1.NodeID)
// three nodes advertise cpu: must show up in the list
cpuNodesUtil := getNodesUtilByType(t, result.NodesUtilList,
siCommon.CPU)
assert.Equal(t, cpuNodesUtil.NodesUtil[0].NumOfNodes, int64(1))
- assert.Equal(t, cpuNodesUtil.NodesUtil[0].NodeNames[0], node3ID)
+ assert.Equal(t, cpuNodesUtil.NodesUtil[0].NodeNames[0], node3.NodeID)
assert.Equal(t, cpuNodesUtil.NodesUtil[2].NumOfNodes, int64(1))
- assert.Equal(t, cpuNodesUtil.NodesUtil[2].NodeNames[0], node1ID)
+ assert.Equal(t, cpuNodesUtil.NodesUtil[2].NodeNames[0], node1.NodeID)
assert.Equal(t, cpuNodesUtil.NodesUtil[4].NumOfNodes, int64(1))
- assert.Equal(t, cpuNodesUtil.NodesUtil[4].NodeNames[0], node2ID)
+ assert.Equal(t, cpuNodesUtil.NodesUtil[4].NodeNames[0], node2.NodeID)
// one node advertise GPU: must show up in the list
gpuNodesUtil := getNodesUtilByType(t, result.NodesUtilList, "GPU")
assert.Equal(t, gpuNodesUtil.NodesUtil[4].NumOfNodes, int64(1))
- assert.Equal(t, gpuNodesUtil.NodesUtil[4].NodeNames[0], node2ID)
+ assert.Equal(t, gpuNodesUtil.NodesUtil[4].NodeNames[0], node2.NodeID)
}
func TestGetNodeUtilisations(t *testing.T) {
@@ -2638,9 +2629,9 @@ func TestCheckHealthStatus(t *testing.T) {
}
func runHealthCheckTest(t *testing.T, expected *dao.SchedulerHealthDAOInfo) {
- schedulerContext := &scheduler.ClusterContext{}
- schedulerContext.SetLastHealthCheckResult(expected)
- NewWebApp(schedulerContext, nil)
+ testSchedulerContext := &scheduler.ClusterContext{}
+ testSchedulerContext.SetLastHealthCheckResult(expected)
+ NewWebApp(testSchedulerContext, nil)
req, err := http.NewRequest("GET", "/ws/v1/scheduler/healthcheck",
strings.NewReader(""))
assert.NilError(t, err, "Error while creating the healthcheck request")
diff --git a/pkg/webservice/webservice.go b/pkg/webservice/webservice.go
index 7554f974..330d11e5 100644
--- a/pkg/webservice/webservice.go
+++ b/pkg/webservice/webservice.go
@@ -62,7 +62,6 @@ func loggingHandler(inner http.Handler, name string)
http.HandlerFunc {
}
// StartWebApp starts the web app on the default port.
-// TODO we need the port to be configurable
func (m *WebService) StartWebApp() {
router := newRouter()
m.httpServer = &http.Server{
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]