This is an automated email from the ASF dual-hosted git repository. pbacsko pushed a commit to branch branch-1.5 in repository https://gitbox.apache.org/repos/asf/yunikorn-core.git
commit f81193733317ad15f91dea299596c36cbab252da Author: Peter Bacsko <pbac...@cloudera.com> AuthorDate: Sat Apr 13 10:47:10 2024 +0800 [YUNIKORN-2554] Remove "rules" field from PartitionContext (#842) Closes: #842 Signed-off-by: Chia-Ping Tsai <chia7...@gmail.com> --- pkg/scheduler/partition.go | 5 +---- pkg/scheduler/partition_test.go | 4 ---- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/pkg/scheduler/partition.go b/pkg/scheduler/partition.go index ba2cfb03..5774cfab 100644 --- a/pkg/scheduler/partition.go +++ b/pkg/scheduler/partition.go @@ -58,7 +58,6 @@ type PartitionContext struct { partitionManager *partitionManager // manager for this partition stateMachine *fsm.FSM // the state of the partition for scheduling stateTime time.Time // last time the state was updated (needed for cleanup) - rules *[]configs.PlacementRule // placement rules to be loaded by the scheduler userGroupCache *security.UserGroupCache // user cache per partition totalPartitionResource *resources.Resource // Total node resources allocations int // Number of allocations on the partition @@ -124,10 +123,9 @@ func (pc *PartitionContext) initialPartitionFromConfig(conf configs.PartitionCon zap.String("partitionName", pc.Name), zap.String("rmID", pc.RmID)) - pc.rules = &conf.PlacementRules // We need to pass in the locked version of the GetQueue function. // Placing an application will not have a lock on the partition context. - pc.placementManager = placement.NewPlacementManager(*pc.rules, pc.GetQueue) + pc.placementManager = placement.NewPlacementManager(conf.PlacementRules, pc.GetQueue) // get the user group cache for the partition // TODO get the resolver from the config pc.userGroupCache = security.GetUserGroupCache("") @@ -170,7 +168,6 @@ func (pc *PartitionContext) updatePartitionDetails(conf configs.PartitionConfig) log.Log(log.SchedPartition).Info("New placement rules not activated, config reload failed", zap.Error(err)) return err } - pc.rules = &conf.PlacementRules pc.updateNodeSortingPolicy(conf) pc.updatePreemption(conf) // start at the root: there is only one queue diff --git a/pkg/scheduler/partition_test.go b/pkg/scheduler/partition_test.go index 14bef767..802b5354 100644 --- a/pkg/scheduler/partition_test.go +++ b/pkg/scheduler/partition_test.go @@ -144,7 +144,6 @@ func TestNewWithPlacement(t *testing.T) { } partition, err := newPartitionContext(confWith, rmID, nil) assert.NilError(t, err, "test partition create failed with error") - assert.Equal(t, len(*partition.rules), 1, "Placement rules not set as expected") // add a rule and check if it is updated confWith = configs.PartitionConfig{ @@ -170,7 +169,6 @@ func TestNewWithPlacement(t *testing.T) { } err = partition.updatePartitionDetails(confWith) assert.NilError(t, err, "update partition failed unexpected with error") - assert.Equal(t, len(*partition.rules), 2, "Placement rules not updated as expected") // update to turn off placement manager conf := configs.PartitionConfig{ @@ -186,12 +184,10 @@ func TestNewWithPlacement(t *testing.T) { } err = partition.updatePartitionDetails(conf) assert.NilError(t, err, "update partition failed unexpected with error") - assert.Equal(t, len(*partition.rules), 0, "Placement rules not updated as expected") // set the old config back this should turn on the placement again err = partition.updatePartitionDetails(confWith) assert.NilError(t, err, "update partition failed unexpected with error") - assert.Equal(t, len(*partition.rules), 2, "Placement rules not updated as expected") } func TestAddNode(t *testing.T) { --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@yunikorn.apache.org For additional commands, e-mail: issues-h...@yunikorn.apache.org