This is an automated email from the ASF dual-hosted git repository.

ccondit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-core.git


The following commit(s) were added to refs/heads/master by this push:
     new ee8d7fa6 [YUNIKORN-2907] Avoid extraneous logging during queue config 
processing (#1009)
ee8d7fa6 is described below

commit ee8d7fa6999a9ee3a3e9013aa9bddcd63ed35a2a
Author: Michael <[email protected]>
AuthorDate: Thu Feb 20 11:15:04 2025 -0600

    [YUNIKORN-2907] Avoid extraneous logging during queue config processing 
(#1009)
    
    Closes: #1009
    
    Signed-off-by: Craig Condit <[email protected]>
---
 pkg/common/security/acl.go                | 30 ++++++++++++-------
 pkg/common/security/acl_test.go           |  6 ++--
 pkg/scheduler/context.go                  |  4 +--
 pkg/scheduler/context_test.go             |  2 +-
 pkg/scheduler/objects/queue.go            | 29 ++++++++++--------
 pkg/scheduler/objects/queue_test.go       | 25 ++++++++++++----
 pkg/scheduler/objects/utilities_test.go   |  2 +-
 pkg/scheduler/partition.go                | 49 ++++++++++++++++++-------------
 pkg/scheduler/partition_manager_test.go   |  2 +-
 pkg/scheduler/partition_test.go           | 26 ++++++++--------
 pkg/scheduler/placement/placement.go      | 28 +++++++++++-------
 pkg/scheduler/placement/placement_test.go | 34 ++++++++++-----------
 pkg/scheduler/placement/testrule_test.go  |  4 +--
 pkg/scheduler/utilities_test.go           | 12 ++++----
 14 files changed, 148 insertions(+), 105 deletions(-)

diff --git a/pkg/common/security/acl.go b/pkg/common/security/acl.go
index 8ea8ddfc..a864cbb0 100644
--- a/pkg/common/security/acl.go
+++ b/pkg/common/security/acl.go
@@ -46,12 +46,15 @@ func (a *ACL) setAllAllowed(part string) {
        a.allAllowed = part == common.Wildcard
 }
 
-// set the user list in the ACL, invalid user names are ignored
-func (a *ACL) setUsers(userList []string) {
+// set the user list in the ACL, invalid user names are ignored.
+// If the silence flag is set to true, the function will not log when setting 
the users.
+func (a *ACL) setUsers(userList []string, silence bool) {
        a.users = make(map[string]bool)
        // special case if the user list is just the wildcard
        if len(userList) == 1 && userList[0] == common.Wildcard {
-               log.Log(log.Security).Info("user list is wildcard, allowing all 
access")
+               if !silence {
+                       log.Log(log.Security).Info("user list is wildcard, 
allowing all access")
+               }
                a.allAllowed = true
                return
        }
@@ -64,7 +67,7 @@ func (a *ACL) setUsers(userList []string) {
                // check the users validity
                if userNameRegExp.MatchString(user) {
                        a.users[user] = true
-               } else {
+               } else if !silence {
                        log.Log(log.Security).Info("ignoring user in ACL 
definition",
                                zap.String("user", user))
                }
@@ -72,15 +75,20 @@ func (a *ACL) setUsers(userList []string) {
 }
 
 // set the group list in the ACL, invalid group names are ignored
-func (a *ACL) setGroups(groupList []string) {
+// If the silence flag is set to true, the function will not log when setting 
the groups.
+func (a *ACL) setGroups(groupList []string, silence bool) {
        a.groups = make(map[string]bool)
        // special case if the wildcard was already set
        if a.allAllowed {
-               log.Log(log.Security).Info("ignoring group list in ACL: 
wildcard set")
+               if !silence {
+                       log.Log(log.Security).Info("ignoring group list in ACL: 
wildcard set")
+               }
                return
        }
        if len(groupList) == 1 && groupList[0] == common.Wildcard {
-               log.Log(log.Security).Info("group list is wildcard, allowing 
all access")
+               if !silence {
+                       log.Log(log.Security).Info("group list is wildcard, 
allowing all access")
+               }
                a.users = make(map[string]bool)
                a.allAllowed = true
                return
@@ -94,7 +102,7 @@ func (a *ACL) setGroups(groupList []string) {
                // check the group validity
                if groupRegExp.MatchString(group) {
                        a.groups[group] = true
-               } else {
+               } else if !silence {
                        log.Log(log.Security).Info("ignoring group in ACL",
                                zap.String("group", group))
                }
@@ -102,7 +110,7 @@ func (a *ACL) setGroups(groupList []string) {
 }
 
 // create a new ACL from scratch
-func NewACL(aclStr string) (ACL, error) {
+func NewACL(aclStr string, silence bool) (ACL, error) {
        acl := ACL{}
        if aclStr == "" {
                return acl, nil
@@ -116,9 +124,9 @@ func NewACL(aclStr string) (ACL, error) {
        // trim and check for wildcard
        acl.setAllAllowed(aclStr)
        // parse users and groups
-       acl.setUsers(strings.Split(fields[0], common.Separator))
+       acl.setUsers(strings.Split(fields[0], common.Separator), silence)
        if len(fields) == 2 {
-               acl.setGroups(strings.Split(fields[1], common.Separator))
+               acl.setGroups(strings.Split(fields[1], common.Separator), 
silence)
        }
        return acl, nil
 }
diff --git a/pkg/common/security/acl_test.go b/pkg/common/security/acl_test.go
index 9ea2f547..c093d678 100644
--- a/pkg/common/security/acl_test.go
+++ b/pkg/common/security/acl_test.go
@@ -155,7 +155,7 @@ func TestACLCreate(t *testing.T) {
        }
        for _, tt := range tests {
                t.Run(tt.input, func(t *testing.T) {
-                       got, err := NewACL(tt.input)
+                       got, err := NewACL(tt.input, false)
                        if err != nil {
                                t.Errorf("parsing failed for string: %s", 
tt.input)
                        }
@@ -177,7 +177,7 @@ func TestNewACLErrorCase(t *testing.T) {
        }
        for _, tt := range tests {
                t.Run(tt.caseName, func(t *testing.T) {
-                       if _, err := NewACL(tt.acl); err == nil {
+                       if _, err := NewACL(tt.acl, false); err == nil {
                                t.Errorf("parsing %s string should be failed", 
tt.acl)
                        }
                })
@@ -238,7 +238,7 @@ func TestACLAccess(t *testing.T) {
        }
        for _, tt := range tests {
                t.Run(fmt.Sprintf("vistor %v, acl %s", tt.visitor, tt.acl), 
func(t *testing.T) {
-                       acl, err := NewACL(tt.acl)
+                       acl, err := NewACL(tt.acl, false)
                        if err != nil {
                                t.Error("the number of space should not be more 
than 2 because the number of categories only include users and groups")
                        }
diff --git a/pkg/scheduler/context.go b/pkg/scheduler/context.go
index ca2aebe4..b5b91c2a 100644
--- a/pkg/scheduler/context.go
+++ b/pkg/scheduler/context.go
@@ -365,7 +365,7 @@ func (cc *ClusterContext) updateSchedulerConfig(conf 
*configs.SchedulerConfig, r
                part, ok := cc.partitions[p.Name]
                if ok {
                        // make sure the new info passes all checks
-                       _, err = newPartitionContext(p, rmID, nil)
+                       _, err = newPartitionContext(p, rmID, nil, true)
                        if err != nil {
                                return err
                        }
@@ -379,7 +379,7 @@ func (cc *ClusterContext) updateSchedulerConfig(conf 
*configs.SchedulerConfig, r
                        // not found: new partition, no checks needed
                        log.Log(log.SchedContext).Info("added partitions", 
zap.String("partitionName", partitionName))
 
-                       part, err = newPartitionContext(p, rmID, cc)
+                       part, err = newPartitionContext(p, rmID, cc, false)
                        if err != nil {
                                return err
                        }
diff --git a/pkg/scheduler/context_test.go b/pkg/scheduler/context_test.go
index 854623e4..04287f31 100644
--- a/pkg/scheduler/context_test.go
+++ b/pkg/scheduler/context_test.go
@@ -86,7 +86,7 @@ func createTestContext(t *testing.T, partitionName string) 
*ClusterContext {
                        },
                },
        }
-       partition, err := newPartitionContext(conf, "test", context)
+       partition, err := newPartitionContext(conf, "test", context, false)
        assert.NilError(t, err, "partition create should not have failed with 
error")
        context.partitions[partition.Name] = partition
        return context
diff --git a/pkg/scheduler/objects/queue.go b/pkg/scheduler/objects/queue.go
index 5252ba70..b111d338 100644
--- a/pkg/scheduler/objects/queue.go
+++ b/pkg/scheduler/objects/queue.go
@@ -114,8 +114,9 @@ func newBlankQueue() *Queue {
 }
 
 // NewConfiguredQueue creates a new queue from scratch based on the 
configuration
-// lock free as it cannot be referenced yet
-func NewConfiguredQueue(conf configs.QueueConfig, parent *Queue) (*Queue, 
error) {
+// lock free as it cannot be referenced yet.
+// If the silence flag is set to true, the function will neither log the queue 
creation nor send a queue event.
+func NewConfiguredQueue(conf configs.QueueConfig, parent *Queue, silence bool) 
(*Queue, error) {
        sq := newBlankQueue()
        sq.Name = strings.ToLower(conf.Name)
        sq.QueuePath = strings.ToLower(conf.Name)
@@ -128,7 +129,7 @@ func NewConfiguredQueue(conf configs.QueueConfig, parent 
*Queue) (*Queue, error)
        sq.updateMaxRunningAppsMetrics()
 
        // update the properties
-       if err := sq.applyConf(conf); err != nil {
+       if err := sq.applyConf(conf, silence); err != nil {
                return nil, errors.Join(errors.New("configured queue creation 
failed: "), err)
        }
 
@@ -145,10 +146,13 @@ func NewConfiguredQueue(conf configs.QueueConfig, parent 
*Queue) (*Queue, error)
        } else {
                sq.UpdateQueueProperties()
        }
-       sq.queueEvents = schedEvt.NewQueueEvents(events.GetEventSystem())
-       log.Log(log.SchedQueue).Info("configured queue added to scheduler",
-               zap.String("queueName", sq.QueuePath))
-       sq.queueEvents.SendNewQueueEvent(sq.QueuePath, sq.isManaged)
+
+       if !silence {
+               sq.queueEvents = 
schedEvt.NewQueueEvents(events.GetEventSystem())
+               log.Log(log.SchedQueue).Info("configured queue added to 
scheduler",
+                       zap.String("queueName", sq.QueuePath))
+               sq.queueEvents.SendNewQueueEvent(sq.QueuePath, sq.isManaged)
+       }
 
        return sq, nil
 }
@@ -310,21 +314,22 @@ func filterParentProperty(key string, value string) 
string {
 func (sq *Queue) ApplyConf(conf configs.QueueConfig) error {
        sq.Lock()
        defer sq.Unlock()
-       return sq.applyConf(conf)
+       return sq.applyConf(conf, false)
 }
 
 // applyConf applies all the properties to the queue from the config.
-// lock free call, must be called holding the queue lock or during create only
-func (sq *Queue) applyConf(conf configs.QueueConfig) error {
+// lock free call, must be called holding the queue lock or during create only.
+// If the silence flag is set to true, the function will not log when setting 
users and groups.
+func (sq *Queue) applyConf(conf configs.QueueConfig, silence bool) error {
        // Set the ACLs
        var err error
-       sq.submitACL, err = security.NewACL(conf.SubmitACL)
+       sq.submitACL, err = security.NewACL(conf.SubmitACL, silence)
        if err != nil {
                log.Log(log.SchedQueue).Error("parsing submit ACL failed this 
should not happen",
                        zap.Error(err))
                return err
        }
-       sq.adminACL, err = security.NewACL(conf.AdminACL)
+       sq.adminACL, err = security.NewACL(conf.AdminACL, silence)
        if err != nil {
                log.Log(log.SchedQueue).Error("parsing admin ACL failed this 
should not happen",
                        zap.Error(err))
diff --git a/pkg/scheduler/objects/queue_test.go 
b/pkg/scheduler/objects/queue_test.go
index edfaf985..c0d0b16d 100644
--- a/pkg/scheduler/objects/queue_test.go
+++ b/pkg/scheduler/objects/queue_test.go
@@ -2267,7 +2267,7 @@ func TestNewConfiguredQueue(t *testing.T) {
                        },
                },
        }
-       parent, err := NewConfiguredQueue(parentConfig, nil)
+       parent, err := NewConfiguredQueue(parentConfig, nil, false)
        assert.NilError(t, err, "failed to create queue: %v", err)
        assert.Equal(t, parent.Name, "parent_queue")
        assert.Equal(t, parent.QueuePath, "parent_queue")
@@ -2287,7 +2287,7 @@ func TestNewConfiguredQueue(t *testing.T) {
                        Guaranteed: getResourceConf(),
                },
        }
-       childLeaf, err := NewConfiguredQueue(leafConfig, parent)
+       childLeaf, err := NewConfiguredQueue(leafConfig, parent, false)
        assert.NilError(t, err, "failed to create queue: %v", err)
        assert.Equal(t, childLeaf.QueuePath, "parent_queue.leaf_queue")
        assert.Assert(t, childLeaf.template == nil)
@@ -2304,13 +2304,26 @@ func TestNewConfiguredQueue(t *testing.T) {
                Name:   "nonleaf_queue",
                Parent: true,
        }
-       childNonLeaf, err := NewConfiguredQueue(NonLeafConfig, parent)
+       childNonLeaf, err := NewConfiguredQueue(NonLeafConfig, parent, false)
        assert.NilError(t, err, "failed to create queue: %v", err)
        assert.Equal(t, childNonLeaf.QueuePath, "parent_queue.nonleaf_queue")
        assert.Assert(t, reflect.DeepEqual(childNonLeaf.template, 
parent.template))
        assert.Equal(t, len(childNonLeaf.properties), 0)
        assert.Assert(t, childNonLeaf.guaranteedResource == nil)
        assert.Assert(t, childNonLeaf.maxResource == nil)
+
+       // case 2: do not send queue event when silence flag is set to true
+       events.Init()
+       eventSystem := events.GetEventSystem().(*events.EventSystemImpl) 
//nolint:errcheck
+       eventSystem.StartServiceWithPublisher(false)
+       rootConfig := configs.QueueConfig{
+               Name: "root",
+       }
+       _, err = NewConfiguredQueue(rootConfig, nil, true)
+       assert.NilError(t, err, "failed to create queue: %v", err)
+       time.Sleep(time.Second)
+       noEvents := eventSystem.Store.CountStoredEvents()
+       assert.Equal(t, noEvents, uint64(0), "expected 0 event, got %d", 
noEvents)
 }
 
 func TestResetRunningState(t *testing.T) {
@@ -2333,11 +2346,11 @@ func TestResetRunningState(t *testing.T) {
        parent.MarkQueueForRemoval()
        assert.Assert(t, parent.IsDraining(), "parent should be marked as 
draining")
        assert.Assert(t, leaf.IsDraining(), "leaf should be marked as draining")
-       err = parent.applyConf(emptyConf)
+       err = parent.applyConf(emptyConf, false)
        assert.NilError(t, err, "failed to update parent")
        assert.Assert(t, parent.IsRunning(), "parent should be running again")
        assert.Assert(t, leaf.IsDraining(), "leaf should still be marked as 
draining")
-       err = leaf.applyConf(emptyConf)
+       err = leaf.applyConf(emptyConf, false)
        assert.NilError(t, err, "failed to update leaf")
        assert.Assert(t, leaf.IsRunning(), "leaf should be running again")
 }
@@ -2360,7 +2373,7 @@ func TestNewRecoveryQueue(t *testing.T) {
                Properties:    map[string]string{configs.ApplicationSortPolicy: 
"fair"},
                ChildTemplate: configs.ChildTemplate{Properties: 
map[string]string{configs.ApplicationSortPolicy: "fair"}},
        }
-       parent, err = NewConfiguredQueue(parentConfig, nil)
+       parent, err = NewConfiguredQueue(parentConfig, nil, false)
        assert.NilError(t, err, "failed to create queue: %v", err)
        recoveryQueue, err := NewRecoveryQueue(parent)
        assert.NilError(t, err, "failed to create recovery queue: %v", err)
diff --git a/pkg/scheduler/objects/utilities_test.go 
b/pkg/scheduler/objects/utilities_test.go
index f9b687b1..6d91a0b3 100644
--- a/pkg/scheduler/objects/utilities_test.go
+++ b/pkg/scheduler/objects/utilities_test.go
@@ -97,7 +97,7 @@ func createManagedQueuePropsMaxApps(parentSQ *Queue, name 
string, parent bool, m
                        Guaranteed: guarRes,
                }
        }
-       queue, err := NewConfiguredQueue(queueConfig, parentSQ)
+       queue, err := NewConfiguredQueue(queueConfig, parentSQ, false)
        if err != nil {
                return nil, err
        }
diff --git a/pkg/scheduler/partition.go b/pkg/scheduler/partition.go
index 1375d185..f8d34a70 100644
--- a/pkg/scheduler/partition.go
+++ b/pkg/scheduler/partition.go
@@ -79,7 +79,7 @@ type PartitionContext struct {
        locking.RWMutex
 }
 
-func newPartitionContext(conf configs.PartitionConfig, rmID string, cc 
*ClusterContext) (*PartitionContext, error) {
+func newPartitionContext(conf configs.PartitionConfig, rmID string, cc 
*ClusterContext, silence bool) (*PartitionContext, error) {
        if conf.Name == "" || rmID == "" {
                log.Log(log.SchedPartition).Info("partition cannot be created",
                        zap.String("partition name", conf.Name),
@@ -98,15 +98,16 @@ func newPartitionContext(conf configs.PartitionConfig, rmID 
string, cc *ClusterC
                foreignAllocs:         make(map[string]*objects.Allocation),
        }
        pc.partitionManager = newPartitionManager(pc, cc)
-       if err := pc.initialPartitionFromConfig(conf); err != nil {
+       if err := pc.initialPartitionFromConfig(conf, silence); err != nil {
                return nil, err
        }
 
        return pc, nil
 }
 
-// Initialise the partition
-func (pc *PartitionContext) initialPartitionFromConfig(conf 
configs.PartitionConfig) error {
+// Initialise the partition.
+// If the silence flag is set to true, the function will not log queue 
creation or node sorting policy, update limit settings, or send a queue event.
+func (pc *PartitionContext) initialPartitionFromConfig(conf 
configs.PartitionConfig, silence bool) error {
        if len(conf.Queues) == 0 || conf.Queues[0].Name != configs.RootQueue {
                return fmt.Errorf("partition cannot be created without root 
queue")
        }
@@ -115,38 +116,45 @@ func (pc *PartitionContext) 
initialPartitionFromConfig(conf configs.PartitionCon
        // Add the rest of the queue structure recursively
        queueConf := conf.Queues[0]
        var err error
-       if pc.root, err = objects.NewConfiguredQueue(queueConf, nil); err != 
nil {
+       if pc.root, err = objects.NewConfiguredQueue(queueConf, nil, silence); 
err != nil {
                return err
        }
        // recursively add the queues to the root
-       if err = pc.addQueue(queueConf.Queues, pc.root); err != nil {
+       if err = pc.addQueue(queueConf.Queues, pc.root, silence); err != nil {
                return err
        }
-       log.Log(log.SchedPartition).Info("root queue added",
-               zap.String("partitionName", pc.Name),
-               zap.String("rmID", pc.RmID))
+
+       if !silence {
+               log.Log(log.SchedPartition).Info("root queue added",
+                       zap.String("partitionName", pc.Name),
+                       zap.String("rmID", pc.RmID))
+       }
 
        // We need to pass in the locked version of the GetQueue function.
        // Placing an application will not have a lock on the partition context.
-       pc.placementManager = 
placement.NewPlacementManager(conf.PlacementRules, pc.GetQueue)
+       pc.placementManager = 
placement.NewPlacementManager(conf.PlacementRules, pc.GetQueue, silence)
        // get the user group cache for the partition
        pc.userGroupCache = security.GetUserGroupCache("")
-       pc.updateNodeSortingPolicy(conf)
+       pc.updateNodeSortingPolicy(conf, silence)
        pc.updatePreemption(conf)
 
        // update limit settings: start at the root
-       return ugm.GetUserManager().UpdateConfig(queueConf, conf.Queues[0].Name)
+       if !silence {
+               return ugm.GetUserManager().UpdateConfig(queueConf, 
conf.Queues[0].Name)
+       }
+       return nil
 }
 
 // NOTE: this is a lock free call. It should only be called holding the 
PartitionContext lock.
-func (pc *PartitionContext) updateNodeSortingPolicy(conf 
configs.PartitionConfig) {
+// If the silence flag is set to true, the function will not log when setting 
the node sorting policy.
+func (pc *PartitionContext) updateNodeSortingPolicy(conf 
configs.PartitionConfig, silence bool) {
        var configuredPolicy policies.SortingPolicy
        configuredPolicy, err := 
policies.SortingPolicyFromString(conf.NodeSortPolicy.Type)
        if err != nil {
                log.Log(log.SchedPartition).Debug("NodeSorting policy 
incorrectly set or unknown",
                        zap.Error(err))
                log.Log(log.SchedPartition).Info(fmt.Sprintf("NodeSorting 
policy not set using '%s' as default", configuredPolicy))
-       } else {
+       } else if !silence {
                log.Log(log.SchedPartition).Info("NodeSorting policy set from 
config",
                        zap.Stringer("policyName", configuredPolicy))
        }
@@ -170,7 +178,7 @@ func (pc *PartitionContext) updatePartitionDetails(conf 
configs.PartitionConfig)
                log.Log(log.SchedPartition).Info("New placement rules not 
activated, config reload failed", zap.Error(err))
                return err
        }
-       pc.updateNodeSortingPolicy(conf)
+       pc.updateNodeSortingPolicy(conf, false)
 
        pc.Lock()
        defer pc.Unlock()
@@ -191,17 +199,18 @@ func (pc *PartitionContext) updatePartitionDetails(conf 
configs.PartitionConfig)
        return ugm.GetUserManager().UpdateConfig(queueConf, conf.Queues[0].Name)
 }
 
-// Process the config structure and create a queue info tree for this partition
-func (pc *PartitionContext) addQueue(conf []configs.QueueConfig, parent 
*objects.Queue) error {
+// Process the config structure and create a queue info tree for this 
partition.
+// If the silence flag is set to true, the function will neither log the queue 
creation nor send a queue event.
+func (pc *PartitionContext) addQueue(conf []configs.QueueConfig, parent 
*objects.Queue, silence bool) error {
        // create the queue at this level
        for _, queueConf := range conf {
-               thisQueue, err := objects.NewConfiguredQueue(queueConf, parent)
+               thisQueue, err := objects.NewConfiguredQueue(queueConf, parent, 
silence)
                if err != nil {
                        return err
                }
                // recursive create the queues below
                if len(queueConf.Queues) > 0 {
-                       err = pc.addQueue(queueConf.Queues, thisQueue)
+                       err = pc.addQueue(queueConf.Queues, thisQueue, silence)
                        if err != nil {
                                return err
                        }
@@ -224,7 +233,7 @@ func (pc *PartitionContext) updateQueues(config 
[]configs.QueueConfig, parent *o
                queue := pc.getQueueInternal(pathName)
                var err error
                if queue == nil {
-                       queue, err = objects.NewConfiguredQueue(queueConfig, 
parent)
+                       queue, err = objects.NewConfiguredQueue(queueConfig, 
parent, false)
                } else {
                        err = queue.ApplyConf(queueConfig)
                }
diff --git a/pkg/scheduler/partition_manager_test.go 
b/pkg/scheduler/partition_manager_test.go
index 5c949a9f..08cecf40 100644
--- a/pkg/scheduler/partition_manager_test.go
+++ b/pkg/scheduler/partition_manager_test.go
@@ -41,7 +41,7 @@ func createPartitionContext(t *testing.T) *PartitionContext {
                },
        }
        cc := &ClusterContext{}
-       partition, err := newPartitionContext(conf, "test", cc)
+       partition, err := newPartitionContext(conf, "test", cc, false)
        assert.NilError(t, err)
        return partition
 }
diff --git a/pkg/scheduler/partition_test.go b/pkg/scheduler/partition_test.go
index 673c3948..4fac0f49 100644
--- a/pkg/scheduler/partition_test.go
+++ b/pkg/scheduler/partition_test.go
@@ -60,16 +60,16 @@ func setupNode(t *testing.T, nodeID string, partition 
*PartitionContext, nodeRes
 }
 
 func TestNewPartition(t *testing.T) {
-       partition, err := newPartitionContext(configs.PartitionConfig{}, "", 
nil)
+       partition, err := newPartitionContext(configs.PartitionConfig{}, "", 
nil, false)
        if err == nil || partition != nil {
                t.Fatal("nil inputs should not have returned partition")
        }
        conf := configs.PartitionConfig{Name: "test"}
-       partition, err = newPartitionContext(conf, "", nil)
+       partition, err = newPartitionContext(conf, "", nil, false)
        if err == nil || partition != nil {
                t.Fatal("named partition without RM should not have returned 
partition")
        }
-       partition, err = newPartitionContext(conf, "test", &ClusterContext{})
+       partition, err = newPartitionContext(conf, "test", &ClusterContext{}, 
false)
        if err == nil || partition != nil {
                t.Fatal("partition without root queue should not have returned 
partition")
        }
@@ -83,7 +83,7 @@ func TestNewPartition(t *testing.T) {
                        },
                },
        }
-       partition, err = newPartitionContext(conf, "test", &ClusterContext{})
+       partition, err = newPartitionContext(conf, "test", &ClusterContext{}, 
false)
        if err == nil || partition != nil {
                t.Fatal("partition without root queue should not have returned 
partition")
        }
@@ -115,7 +115,7 @@ func TestNewPartition(t *testing.T) {
                        },
                },
        }
-       partition, err = newPartitionContext(conf, "test", &ClusterContext{})
+       partition, err = newPartitionContext(conf, "test", &ClusterContext{}, 
false)
        assert.NilError(t, err, "partition create should not have failed with 
error")
        if partition.root.QueuePath != "root" {
                t.Fatal("partition root queue not set as expected")
@@ -142,7 +142,7 @@ func TestNewWithPlacement(t *testing.T) {
                Limits:         nil,
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
-       partition, err := newPartitionContext(confWith, rmID, nil)
+       partition, err := newPartitionContext(confWith, rmID, nil, false)
        assert.NilError(t, err, "test partition create failed with error")
 
        // add a rule and check if it is updated
@@ -1004,7 +1004,7 @@ func TestAddAppForcedWithPlacement(t *testing.T) {
                Limits:         nil,
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
-       partition, err := newPartitionContext(confWith, rmID, nil)
+       partition, err := newPartitionContext(confWith, rmID, nil, false)
        assert.NilError(t, err, "test partition create failed with error")
 
        // add a new app using tag rule
@@ -1314,7 +1314,7 @@ func TestCreateDeepQueueConfig(t *testing.T) {
        if root == nil {
                t.Error("root queue not found in partition")
        }
-       err = partition.addQueue(conf, root)
+       err = partition.addQueue(conf, root, false)
        assert.NilError(t, err, "'root.level1.level2.level3.level4.level5' 
queue creation from config failed")
        queue := partition.GetQueue("root.level1.level2.level3.level4.level5")
        if queue == nil {
@@ -1530,7 +1530,7 @@ func TestGetQueue(t *testing.T) {
        }
        var parent *objects.Queue
        // manually add the queue in below the root
-       parent, err = objects.NewConfiguredQueue(parentConf, queue)
+       parent, err = objects.NewConfiguredQueue(parentConf, queue, false)
        assert.NilError(t, err, "failed to create parent queue")
        queue = partition.GetQueue("root.unknown")
        assert.Equal(t, queue, nilQueue, "partition returned not nil for non 
existing queue name request: %v", queue)
@@ -3864,7 +3864,7 @@ func TestUpdateNodeSortingPolicy(t *testing.T) {
                                PlacementRules: nil,
                                Limits:         nil,
                                NodeSortPolicy: configs.NodeSortingPolicy{Type: 
tt.input},
-                       })
+                       }, false)
 
                        ans := 
partition.nodes.GetNodeSortingPolicy().PolicyType().String()
                        if ans != tt.want {
@@ -3911,7 +3911,7 @@ func TestGetNodeSortingPolicyWhenNewPartitionFromConfig(t 
*testing.T) {
                                },
                        }
 
-                       p, err := newPartitionContext(conf, rmID, nil)
+                       p, err := newPartitionContext(conf, rmID, nil, false)
                        if err != nil {
                                t.Errorf("Partition creation fail: %s", 
err.Error())
                        }
@@ -4401,7 +4401,7 @@ func TestLimitMaxApplications(t *testing.T) {
                                NodeSortPolicy: configs.NodeSortingPolicy{},
                        }
 
-                       partition, err := newPartitionContext(conf, rmID, nil)
+                       partition, err := newPartitionContext(conf, rmID, nil, 
false)
                        assert.NilError(t, err, "partition create failed")
 
                        // add node1
@@ -4556,7 +4556,7 @@ func TestLimitMaxApplicationsForReservedAllocation(t 
*testing.T) {
                                NodeSortPolicy: configs.NodeSortingPolicy{},
                        }
 
-                       partition, err := newPartitionContext(conf, rmID, nil)
+                       partition, err := newPartitionContext(conf, rmID, nil, 
false)
                        assert.NilError(t, err, "partition create failed")
 
                        // add node1
diff --git a/pkg/scheduler/placement/placement.go 
b/pkg/scheduler/placement/placement.go
index 10e3e663..31b6f406 100644
--- a/pkg/scheduler/placement/placement.go
+++ b/pkg/scheduler/placement/placement.go
@@ -43,11 +43,11 @@ type AppPlacementManager struct {
        locking.RWMutex
 }
 
-func NewPlacementManager(rules []configs.PlacementRule, queueFunc func(string) 
*objects.Queue) *AppPlacementManager {
+func NewPlacementManager(rules []configs.PlacementRule, queueFunc func(string) 
*objects.Queue, silence bool) *AppPlacementManager {
        m := &AppPlacementManager{
                queueFn: queueFunc,
        }
-       if err := m.initialise(rules); err != nil {
+       if err := m.initialise(rules, silence); err != nil {
                log.Log(log.Config).Error("Placement manager created without 
rules: not active", zap.Error(err))
        }
        return m
@@ -66,7 +66,7 @@ func (m *AppPlacementManager) GetRulesDAO() []*dao.RuleDAO {
 
 // UpdateRules sets the rules for an active placement manager
 func (m *AppPlacementManager) UpdateRules(rules []configs.PlacementRule) error 
{
-       if err := m.initialise(rules); err != nil {
+       if err := m.initialise(rules, false); err != nil {
                log.Log(log.Config).Info("Placement manager rules not 
reloaded", zap.Error(err))
                return err
        }
@@ -74,17 +74,22 @@ func (m *AppPlacementManager) UpdateRules(rules 
[]configs.PlacementRule) error {
 }
 
 // initialise the rules from a parsed config.
-func (m *AppPlacementManager) initialise(rules []configs.PlacementRule) error {
-       log.Log(log.Config).Info("Building new rule list for placement manager")
+// If the silence flag is set to true, the function will not log.
+func (m *AppPlacementManager) initialise(rules []configs.PlacementRule, 
silence bool) error {
+       if !silence {
+               log.Log(log.Config).Info("Building new rule list for placement 
manager")
+       }
        // build temp list from new config
-       tempRules, err := buildRules(rules)
+       tempRules, err := buildRules(rules, silence)
        if err != nil {
                return err
        }
        m.Lock()
        defer m.Unlock()
 
-       log.Log(log.Config).Info("Activated rule set in placement manager")
+       if !silence {
+               log.Log(log.Config).Info("Activated rule set in placement 
manager")
+       }
        m.rules = tempRules
        // all done manager is initialised
        for rule := range m.rules {
@@ -203,11 +208,14 @@ func (m *AppPlacementManager) PlaceApplication(app 
*objects.Application) error {
 
 // buildRules builds a new rule set based on the config.
 // If the rule set is correct and can be used the new set is returned.
-// If any error is encountered a nil array is returned and the error set
-func buildRules(rules []configs.PlacementRule) ([]rule, error) {
+// If any error is encountered a nil array is returned and the error set.
+// If the silence flag is set to true, the function will not log.
+func buildRules(rules []configs.PlacementRule, silence bool) ([]rule, error) {
        // empty list should result in a single "provided" rule
        if len(rules) == 0 {
-               log.Log(log.Config).Info("Placement manager configured without 
rules: using implicit provided rule")
+               if !silence {
+                       log.Log(log.Config).Info("Placement manager configured 
without rules: using implicit provided rule")
+               }
                rules = []configs.PlacementRule{{
                        Name:   types.Provided,
                        Create: false,
diff --git a/pkg/scheduler/placement/placement_test.go 
b/pkg/scheduler/placement/placement_test.go
index 9ea73a78..add7def8 100644
--- a/pkg/scheduler/placement/placement_test.go
+++ b/pkg/scheduler/placement/placement_test.go
@@ -34,20 +34,20 @@ import (
 // basic test to check if no rules leave the manager unusable
 func TestManagerNew(t *testing.T) {
        // basic info without rules, manager should not init
-       man := NewPlacementManager(nil, queueFunc)
+       man := NewPlacementManager(nil, queueFunc, false)
        assert.Equal(t, 2, len(man.rules), "wrong rule count for new placement 
manager, no config")
        assert.Equal(t, types.Provided, man.rules[0].getName(), "wrong name for 
implicit provided rule")
        assert.Equal(t, types.Recovery, man.rules[1].getName(), "wrong name for 
implicit recovery rule")
 
        // fail update rules with unknown rule
        rules := []configs.PlacementRule{{Name: "unknown", Create: true}}
-       man = NewPlacementManager(rules, queueFunc)
+       man = NewPlacementManager(rules, queueFunc, false)
        assert.Equal(t, 0, len(man.rules), "wrong rule count for new placement 
manager, no config")
 }
 
 func TestManagerInit(t *testing.T) {
        // basic info without rules, manager should implicitly init
-       man := NewPlacementManager(nil, queueFunc)
+       man := NewPlacementManager(nil, queueFunc, false)
        assert.Equal(t, 2, len(man.rules), "wrong rule count for nil rules 
config")
        ruleDAOs := man.GetRulesDAO()
        assert.Equal(t, 2, len(ruleDAOs), "wrong DAO count for nil rules 
config")
@@ -56,7 +56,7 @@ func TestManagerInit(t *testing.T) {
 
        // try to init with empty list should do the same
        var rules []configs.PlacementRule
-       err := man.initialise(rules)
+       err := man.initialise(rules, false)
        assert.NilError(t, err, "Failed to initialize empty placement rules")
        assert.Equal(t, 2, len(man.rules), "wrong rule count for empty rules 
config")
        ruleDAOs = man.GetRulesDAO()
@@ -67,7 +67,7 @@ func TestManagerInit(t *testing.T) {
        rules = []configs.PlacementRule{
                {Name: "unknown"},
        }
-       err = man.initialise(rules)
+       err = man.initialise(rules, false)
        if err == nil {
                t.Error("initialise with 'unknown' rule list should have 
failed")
        }
@@ -81,7 +81,7 @@ func TestManagerInit(t *testing.T) {
        rules = []configs.PlacementRule{
                {Name: "test"},
        }
-       err = man.initialise(rules)
+       err = man.initialise(rules, false)
        assert.NilError(t, err, "failed to init existing manager")
        ruleDAOs = man.GetRulesDAO()
        assert.Equal(t, 2, len(ruleDAOs), "wrong DAO count for manager with 
test rule")
@@ -90,7 +90,7 @@ func TestManagerInit(t *testing.T) {
 
        // update the manager: remove rules implicit state is reverted
        rules = []configs.PlacementRule{}
-       err = man.initialise(rules)
+       err = man.initialise(rules, false)
        assert.NilError(t, err, "Failed to re-initialize empty placement rules")
        assert.Equal(t, 2, len(man.rules), "wrong rule count for empty rules 
config")
        ruleDAOs = man.GetRulesDAO()
@@ -99,7 +99,7 @@ func TestManagerInit(t *testing.T) {
        assert.Equal(t, ruleDAOs[1].Name, types.Recovery, "expected recovery 
rule as second rule")
 
        // check if we handle a nil list
-       err = man.initialise(nil)
+       err = man.initialise(nil, false)
        assert.NilError(t, err, "Failed to re-initialize nil placement rules")
        assert.Equal(t, 2, len(man.rules), "wrong rule count for nil rules 
config")
        ruleDAOs = man.GetRulesDAO()
@@ -117,7 +117,7 @@ func TestManagerInit(t *testing.T) {
                        },
                },
        }
-       err = man.initialise(rules)
+       err = man.initialise(rules, false)
        assert.NilError(t, err, "Failed to re-initialize nil placement rules")
        assert.Equal(t, 2, len(man.rules), "wrong rule count for nil rules 
config")
        ruleDAOs = man.GetRulesDAO()
@@ -128,7 +128,7 @@ func TestManagerInit(t *testing.T) {
 
 func TestManagerUpdate(t *testing.T) {
        // basic info without rules, manager should not init
-       man := NewPlacementManager(nil, queueFunc)
+       man := NewPlacementManager(nil, queueFunc, false)
        // update the manager
        rules := []configs.PlacementRule{
                {Name: "test"},
@@ -172,7 +172,7 @@ func TestManagerBuildRule(t *testing.T) {
        rules := []configs.PlacementRule{
                {Name: "test"},
        }
-       ruleObjs, err := buildRules(rules)
+       ruleObjs, err := buildRules(rules, false)
        if err != nil {
                t.Errorf("test rule build should not have failed, err: %v", err)
        }
@@ -188,7 +188,7 @@ func TestManagerBuildRule(t *testing.T) {
                        },
                },
        }
-       ruleObjs, err = buildRules(rules)
+       ruleObjs, err = buildRules(rules, false)
        if err != nil || len(ruleObjs) != 2 {
                t.Errorf("test rule build should not have failed and created 2 
top level rule, err: %v, rules: %v", err, ruleObjs)
        } else {
@@ -203,7 +203,7 @@ func TestManagerBuildRule(t *testing.T) {
                {Name: "user"},
                {Name: "test"},
        }
-       ruleObjs, err = buildRules(rules)
+       ruleObjs, err = buildRules(rules, false)
        if err != nil || len(ruleObjs) != 3 {
                t.Errorf("rule build should not have failed and created 3 
rules, err: %v, rules: %v", err, ruleObjs)
        } else if ruleObjs[0].getName() != rules[0].Name || 
ruleObjs[1].getName() != rules[1].Name || ruleObjs[2].getName() != "recovery" {
@@ -230,7 +230,7 @@ partitions:
        err := initQueueStructure([]byte(data))
        assert.NilError(t, err, "setting up the queue config failed")
        // basic info without rules, manager should init
-       man := NewPlacementManager(nil, queueFunc)
+       man := NewPlacementManager(nil, queueFunc, false)
        if man == nil {
                t.Fatal("placement manager create failed")
        }
@@ -362,7 +362,7 @@ partitions:
                        Value:  "namespace",
                        Create: true},
        }
-       man := NewPlacementManager(rules, queueFunc)
+       man := NewPlacementManager(rules, queueFunc, false)
        if man == nil {
                t.Fatal("placement manager create failed")
        }
@@ -503,7 +503,7 @@ partitions:
                        Value:  "root.custom",
                        Create: true},
        }
-       man1 := NewPlacementManager(rules, queueFunc)
+       man1 := NewPlacementManager(rules, queueFunc, false)
        if man1 == nil {
                t.Fatal("placement manager create failed")
        }
@@ -559,7 +559,7 @@ partitions:
        err := initQueueStructure([]byte(data))
        assert.NilError(t, err, "setting up the queue config failed")
        // basic info without rules, manager should init
-       man := NewPlacementManager(nil, queueFunc)
+       man := NewPlacementManager(nil, queueFunc, false)
        if man == nil {
                t.Fatal("placement manager create failed")
        }
diff --git a/pkg/scheduler/placement/testrule_test.go 
b/pkg/scheduler/placement/testrule_test.go
index 38bc24fe..71be5721 100644
--- a/pkg/scheduler/placement/testrule_test.go
+++ b/pkg/scheduler/placement/testrule_test.go
@@ -78,7 +78,7 @@ func initQueueStructure(data []byte) error {
                return err
        }
        rootConf := conf.Partitions[0].Queues[0]
-       root, err = objects.NewConfiguredQueue(rootConf, nil)
+       root, err = objects.NewConfiguredQueue(rootConf, nil, false)
        if err != nil {
                return err
        }
@@ -87,7 +87,7 @@ func initQueueStructure(data []byte) error {
 
 func addQueue(conf []configs.QueueConfig, parent *objects.Queue) error {
        for _, queueConf := range conf {
-               thisQueue, err := objects.NewConfiguredQueue(queueConf, parent)
+               thisQueue, err := objects.NewConfiguredQueue(queueConf, parent, 
false)
                if err != nil {
                        return err
                }
diff --git a/pkg/scheduler/utilities_test.go b/pkg/scheduler/utilities_test.go
index c3bd6c9a..dee25e9f 100644
--- a/pkg/scheduler/utilities_test.go
+++ b/pkg/scheduler/utilities_test.go
@@ -110,7 +110,7 @@ func newBasePartitionNoRootDefault() (*PartitionContext, 
error) {
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
 
-       return newPartitionContext(conf, rmID, nil)
+       return newPartitionContext(conf, rmID, nil, false)
 }
 
 func newBasePartition() (*PartitionContext, error) {
@@ -167,7 +167,7 @@ func newBasePartition() (*PartitionContext, error) {
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
 
-       return newPartitionContext(conf, rmID, nil)
+       return newPartitionContext(conf, rmID, nil, false)
 }
 
 func newConfiguredPartition() (*PartitionContext, error) {
@@ -265,7 +265,7 @@ func newConfiguredPartition() (*PartitionContext, error) {
                Limits:         nil,
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
-       return newPartitionContext(conf, rmID, nil)
+       return newPartitionContext(conf, rmID, nil, false)
 }
 
 func newPreemptionConfiguredPartition(parentLimit map[string]string, 
leafGuarantees map[string]string) (*PartitionContext, error) {
@@ -374,7 +374,7 @@ func newPreemptionConfiguredPartition(parentLimit 
map[string]string, leafGuarant
                PlacementRules: nil,
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
-       return newPartitionContext(conf, rmID, nil)
+       return newPartitionContext(conf, rmID, nil, false)
 }
 
 func newLimitedPartition(resLimit map[string]string) (*PartitionContext, 
error) {
@@ -435,7 +435,7 @@ func newLimitedPartition(resLimit map[string]string) 
(*PartitionContext, error)
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
 
-       return newPartitionContext(conf, rmID, nil)
+       return newPartitionContext(conf, rmID, nil, false)
 }
 
 func newPlacementPartition() (*PartitionContext, error) {
@@ -477,7 +477,7 @@ func newPlacementPartition() (*PartitionContext, error) {
                NodeSortPolicy: configs.NodeSortingPolicy{},
        }
 
-       return newPartitionContext(conf, rmID, nil)
+       return newPartitionContext(conf, rmID, nil, false)
 }
 
 func newApplication(appID, partition, queueName string) *objects.Application {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to