This is an automated email from the ASF dual-hosted git repository.
chia7712 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-core.git
The following commit(s) were added to refs/heads/master by this push:
new 3f847b3c [YUNIKORN-2554] Remove "rules" field from PartitionContext
(#842)
3f847b3c is described below
commit 3f847b3c71ab7f5bd875c6c4b71adc8ddcf02784
Author: Peter Bacsko <[email protected]>
AuthorDate: Sat Apr 13 10:47:10 2024 +0800
[YUNIKORN-2554] Remove "rules" field from PartitionContext (#842)
Closes: #842
Signed-off-by: Chia-Ping Tsai <[email protected]>
---
pkg/scheduler/partition.go | 5 +----
pkg/scheduler/partition_test.go | 4 ----
2 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/pkg/scheduler/partition.go b/pkg/scheduler/partition.go
index e7a3473e..0ba7db79 100644
--- a/pkg/scheduler/partition.go
+++ b/pkg/scheduler/partition.go
@@ -58,7 +58,6 @@ type PartitionContext struct {
partitionManager *partitionManager // manager for
this partition
stateMachine *fsm.FSM // the state of
the partition for scheduling
stateTime time.Time // last time the
state was updated (needed for cleanup)
- rules *[]configs.PlacementRule // placement
rules to be loaded by the scheduler
userGroupCache *security.UserGroupCache // user cache
per partition
totalPartitionResource *resources.Resource // Total node
resources
allocations int // Number of
allocations on the partition
@@ -124,10 +123,9 @@ func (pc *PartitionContext)
initialPartitionFromConfig(conf configs.PartitionCon
zap.String("partitionName", pc.Name),
zap.String("rmID", pc.RmID))
- pc.rules = &conf.PlacementRules
// We need to pass in the locked version of the GetQueue function.
// Placing an application will not have a lock on the partition context.
- pc.placementManager = placement.NewPlacementManager(*pc.rules,
pc.GetQueue)
+ pc.placementManager =
placement.NewPlacementManager(conf.PlacementRules, pc.GetQueue)
// get the user group cache for the partition
// TODO get the resolver from the config
pc.userGroupCache = security.GetUserGroupCache("")
@@ -170,7 +168,6 @@ func (pc *PartitionContext) updatePartitionDetails(conf
configs.PartitionConfig)
log.Log(log.SchedPartition).Info("New placement rules not
activated, config reload failed", zap.Error(err))
return err
}
- pc.rules = &conf.PlacementRules
pc.updateNodeSortingPolicy(conf)
pc.updatePreemption(conf)
// start at the root: there is only one queue
diff --git a/pkg/scheduler/partition_test.go b/pkg/scheduler/partition_test.go
index 0b296910..3baa40cc 100644
--- a/pkg/scheduler/partition_test.go
+++ b/pkg/scheduler/partition_test.go
@@ -144,7 +144,6 @@ func TestNewWithPlacement(t *testing.T) {
}
partition, err := newPartitionContext(confWith, rmID, nil)
assert.NilError(t, err, "test partition create failed with error")
- assert.Equal(t, len(*partition.rules), 1, "Placement rules not set as
expected")
// add a rule and check if it is updated
confWith = configs.PartitionConfig{
@@ -170,7 +169,6 @@ func TestNewWithPlacement(t *testing.T) {
}
err = partition.updatePartitionDetails(confWith)
assert.NilError(t, err, "update partition failed unexpected with error")
- assert.Equal(t, len(*partition.rules), 2, "Placement rules not updated
as expected")
// update to turn off placement manager
conf := configs.PartitionConfig{
@@ -186,12 +184,10 @@ func TestNewWithPlacement(t *testing.T) {
}
err = partition.updatePartitionDetails(conf)
assert.NilError(t, err, "update partition failed unexpected with error")
- assert.Equal(t, len(*partition.rules), 0, "Placement rules not updated
as expected")
// set the old config back this should turn on the placement again
err = partition.updatePartitionDetails(confWith)
assert.NilError(t, err, "update partition failed unexpected with error")
- assert.Equal(t, len(*partition.rules), 2, "Placement rules not updated
as expected")
}
func TestAddNode(t *testing.T) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]