This is an automated email from the ASF dual-hosted git repository.

ccondit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git


The following commit(s) were added to refs/heads/master by this push:
     new ee8d1c01 [YUNIKORN-2896] [shim] Remove occupiedResource handling logic 
(#927)
ee8d1c01 is described below

commit ee8d1c01fd4d7660d9d6f7cf38686241dbfd9318
Author: Peter Bacsko <[email protected]>
AuthorDate: Thu Oct 17 14:07:15 2024 -0500

    [YUNIKORN-2896] [shim] Remove occupiedResource handling logic (#927)
    
    Closes: #927
    
    Signed-off-by: Craig Condit <[email protected]>
---
 go.mod                                     |  4 +-
 go.sum                                     |  8 +--
 pkg/cache/context.go                       | 31 +++--------
 pkg/cache/context_test.go                  | 23 ++------
 pkg/cache/external/scheduler_cache.go      | 83 -----------------------------
 pkg/cache/external/scheduler_cache_test.go | 85 ------------------------------
 pkg/common/si_helper.go                    |  5 +-
 pkg/common/si_helper_test.go               |  4 +-
 8 files changed, 20 insertions(+), 223 deletions(-)

diff --git a/go.mod b/go.mod
index b5dbbe8c..195697f9 100644
--- a/go.mod
+++ b/go.mod
@@ -23,8 +23,8 @@ go 1.22.0
 toolchain go1.22.5
 
 require (
-       github.com/apache/yunikorn-core v0.0.0-20241003152125-4ea225160acf
-       github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240924203603-aaf51c93d3a0
+       github.com/apache/yunikorn-core v0.0.0-20241017135039-079a02dbdfa7
+       github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20241016105739-f0e241aa0146
        github.com/google/go-cmp v0.6.0
        github.com/google/uuid v1.6.0
        github.com/looplab/fsm v1.0.1
diff --git a/go.sum b/go.sum
index 566b07b2..5cbb6496 100644
--- a/go.sum
+++ b/go.sum
@@ -8,10 +8,10 @@ github.com/NYTimes/gziphandler v1.1.1 
h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq
 github.com/NYTimes/gziphandler v1.1.1/go.mod 
h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
 github.com/antlr4-go/antlr/v4 v4.13.0 
h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
 github.com/antlr4-go/antlr/v4 v4.13.0/go.mod 
h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
-github.com/apache/yunikorn-core v0.0.0-20241003152125-4ea225160acf 
h1:wKySiY4IA9Us287QRnIxFnuTHXaMSeQ3BhAwSrSW/sQ=
-github.com/apache/yunikorn-core v0.0.0-20241003152125-4ea225160acf/go.mod 
h1:q6OXYpCTGvMJxsEorpIF6icKM/IioMmU6KcsclV1kI0=
-github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240924203603-aaf51c93d3a0 
h1:/9j0YXuifvoOl4YVEbO0r+DPkkYLzaQ+/ac+xCc7SY8=
-github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240924203603-aaf51c93d3a0/go.mod 
h1:co3uU98sj1CUTPNTM13lTyi+CY0DOgDndDW2KiUjktU=
+github.com/apache/yunikorn-core v0.0.0-20241017135039-079a02dbdfa7 
h1:PY3kIiQYxsNcs42DK+8b7NxfTvMF0Z6eIuK+aJNWl18=
+github.com/apache/yunikorn-core v0.0.0-20241017135039-079a02dbdfa7/go.mod 
h1:JA8Uee+D+T9v3p+YznGiGM9cLk5tzX+EM+YYr1TdFYo=
+github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20241016105739-f0e241aa0146 
h1:CZ4U7y19YSxNJVBNox3DahhuoxDL++naBl/kj+kqVFc=
+github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20241016105739-f0e241aa0146/go.mod 
h1:co3uU98sj1CUTPNTM13lTyi+CY0DOgDndDW2KiUjktU=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 
h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod 
h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a 
h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
diff --git a/pkg/cache/context.go b/pkg/cache/context.go
index 613bbc52..28cac93f 100644
--- a/pkg/cache/context.go
+++ b/pkg/cache/context.go
@@ -217,10 +217,8 @@ func (ctx *Context) updateNodeInternal(node *v1.Node, 
register bool) {
 
                if !common.Equals(prevCapacity, newCapacity) {
                        // update capacity
-                       if capacity, occupied, ok := 
ctx.schedulerCache.UpdateCapacity(node.Name, newCapacity); ok {
-                               if err := ctx.updateNodeResources(node, 
capacity, occupied); err != nil {
-                                       log.Log(log.ShimContext).Warn("Failed 
to update node capacity", zap.Error(err))
-                               }
+                       if err := ctx.updateNodeResources(node, newCapacity); 
err != nil {
+                               log.Log(log.ShimContext).Warn("Failed to update 
node capacity", zap.Error(err))
                        } else {
                                log.Log(log.ShimContext).Warn("Failed to update 
cached node capacity", zap.String("nodeName", node.Name))
                        }
@@ -372,7 +370,7 @@ func (ctx *Context) updateForeignPod(pod *v1.Pod) {
        if utils.IsAssignedPod(pod) && !utils.IsPodTerminated(pod) {
                if ctx.schedulerCache.UpdatePod(pod) {
                        // pod was accepted by a real node
-                       log.Log(log.ShimContext).Debug("pod is assigned to a 
node, trigger occupied resource update",
+                       log.Log(log.ShimContext).Debug("pod is assigned to a 
node, trigger foreign resource update",
                                zap.String("namespace", pod.Namespace),
                                zap.String("podName", pod.Name),
                                zap.String("podStatusBefore", podStatusBefore),
@@ -398,7 +396,7 @@ func (ctx *Context) updateForeignPod(pod *v1.Pod) {
        //   3. pod references a known node
        if oldPod != nil && utils.IsPodTerminated(pod) {
                if !ctx.schedulerCache.IsPodOrphaned(string(pod.UID)) {
-                       log.Log(log.ShimContext).Debug("pod terminated, trigger 
occupied resource update",
+                       log.Log(log.ShimContext).Debug("pod terminated, trigger 
foreign resource update",
                                zap.String("namespace", pod.Namespace),
                                zap.String("podName", pod.Name),
                                zap.String("podStatusBefore", podStatusBefore),
@@ -413,7 +411,7 @@ func (ctx *Context) updateForeignPod(pod *v1.Pod) {
                        }
                } else {
                        // pod is orphaned (references an unknown node)
-                       log.Log(log.ShimContext).Info("skipping occupied 
resource update for terminated orphaned pod",
+                       log.Log(log.ShimContext).Info("skipping foreign 
resource update for terminated orphaned pod",
                                zap.String("namespace", pod.Namespace),
                                zap.String("podName", pod.Name),
                                zap.String("nodeName", pod.Spec.NodeName))
@@ -470,20 +468,6 @@ func (ctx *Context) deleteForeignPod(pod *v1.Pod) {
        ctx.schedulerCache.RemovePod(pod)
 }
 
-//nolint:unused
-func (ctx *Context) updateNodeOccupiedResources(nodeName string, namespace 
string, podName string, resource *si.Resource, opt schedulercache.UpdateType) {
-       if common.IsZero(resource) {
-               return
-       }
-       if node, capacity, occupied, ok := 
ctx.schedulerCache.UpdateOccupiedResource(nodeName, namespace, podName, 
resource, opt); ok {
-               if err := ctx.updateNodeResources(node, capacity, occupied); 
err != nil {
-                       log.Log(log.ShimContext).Warn("scheduler rejected 
update to node occupied resources", zap.Error(err))
-               }
-       } else {
-               log.Log(log.ShimContext).Warn("unable to update occupied 
resources for node", zap.String("nodeName", nodeName))
-       }
-}
-
 // filter configMap for the scheduler
 func (ctx *Context) filterConfigMaps(obj interface{}) bool {
        switch obj := obj.(type) {
@@ -1514,7 +1498,6 @@ func (ctx *Context) registerNodes(nodes []*v1.Node) 
([]*v1.Node, error) {
                                constants.DefaultNodeAttributeRackNameKey: 
constants.DefaultRackName,
                        },
                        SchedulableResource: 
common.GetNodeResource(&nodeStatus),
-                       OccupiedResource:    
common.NewResourceBuilder().Build(),
                })
                pendingNodes[node.Name] = node
        }
@@ -1596,8 +1579,8 @@ func (ctx *Context) decommissionNode(node *v1.Node) error 
{
        return ctx.apiProvider.GetAPIs().SchedulerAPI.UpdateNode(request)
 }
 
-func (ctx *Context) updateNodeResources(node *v1.Node, capacity *si.Resource, 
occupied *si.Resource) error {
-       request := common.CreateUpdateRequestForUpdatedNode(node.Name, 
capacity, nil)
+func (ctx *Context) updateNodeResources(node *v1.Node, capacity *si.Resource) 
error {
+       request := common.CreateUpdateRequestForUpdatedNode(node.Name, capacity)
        return ctx.apiProvider.GetAPIs().SchedulerAPI.UpdateNode(request)
 }
 
diff --git a/pkg/cache/context_test.go b/pkg/cache/context_test.go
index a8214bcc..efeeffd7 100644
--- a/pkg/cache/context_test.go
+++ b/pkg/cache/context_test.go
@@ -36,9 +36,7 @@ import (
        k8sEvents "k8s.io/client-go/tools/events"
        "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
 
-       schedulercache "github.com/apache/yunikorn-k8shim/pkg/cache/external"
        "github.com/apache/yunikorn-k8shim/pkg/client"
-       "github.com/apache/yunikorn-k8shim/pkg/common"
        "github.com/apache/yunikorn-k8shim/pkg/common/constants"
        "github.com/apache/yunikorn-k8shim/pkg/common/events"
        "github.com/apache/yunikorn-k8shim/pkg/common/test"
@@ -183,8 +181,8 @@ func TestUpdateNodes(t *testing.T) {
        })
 
        oldNodeResource := make(map[v1.ResourceName]resource.Quantity)
-       oldNodeResource[v1.ResourceName("memory")] = 
*resource.NewQuantity(1024*1000*1000, resource.DecimalSI)
-       oldNodeResource[v1.ResourceName("cpu")] = *resource.NewQuantity(2, 
resource.DecimalSI)
+       oldNodeResource["memory"] = *resource.NewQuantity(1024*1000*1000, 
resource.DecimalSI)
+       oldNodeResource["cpu"] = *resource.NewQuantity(2, resource.DecimalSI)
        oldNode := v1.Node{
                ObjectMeta: apis.ObjectMeta{
                        Name:      Host1,
@@ -197,8 +195,8 @@ func TestUpdateNodes(t *testing.T) {
        }
 
        newNodeResource := make(map[v1.ResourceName]resource.Quantity)
-       newNodeResource[v1.ResourceName("memory")] = 
*resource.NewQuantity(2048*1000*1000, resource.DecimalSI)
-       newNodeResource[v1.ResourceName("cpu")] = *resource.NewQuantity(4, 
resource.DecimalSI)
+       newNodeResource["memory"] = *resource.NewQuantity(2048*1000*1000, 
resource.DecimalSI)
+       newNodeResource["cpu"] = *resource.NewQuantity(4, resource.DecimalSI)
        newNode := v1.Node{
                ObjectMeta: apis.ObjectMeta{
                        Name:      Host1,
@@ -212,12 +210,6 @@ func TestUpdateNodes(t *testing.T) {
 
        ctx.addNode(&oldNode)
        ctx.updateNode(&oldNode, &newNode)
-
-       _, capacity, _, ok := ctx.schedulerCache.UpdateOccupiedResource(
-               Host1, "n/a", "n/a", nil, schedulercache.AddOccupiedResource)
-       assert.Assert(t, ok, "unable to retrieve node capacity")
-       assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources[siCommon.Memory].Value)
-       assert.Equal(t, int64(4000), capacity.Resources[siCommon.CPU].Value)
 }
 
 func TestDeleteNodes(t *testing.T) {
@@ -1915,13 +1907,6 @@ func TestInitializeState(t *testing.T) {
        assert.Equal(t, *pc.PreemptionPolicy, policy, "wrong preemption policy")
        assert.Equal(t, pc.Annotations[constants.AnnotationAllowPreemption], 
constants.True, "wrong allow-preemption value")
 
-       // verify occupied / capacity on node
-       capacity, _, ok := context.schedulerCache.SnapshotResources(nodeName1)
-       assert.Assert(t, ok, "Unable to retrieve node resources")
-       expectedCapacity := common.ParseResource("4", "10G")
-       assert.Equal(t, expectedCapacity.Resources["vcore"].Value, 
capacity.Resources["vcore"].Value, "wrong capacity vcore")
-       assert.Equal(t, expectedCapacity.Resources["memory"].Value, 
capacity.Resources["memory"].Value, "wrong capacity memory")
-
        // check that pod orphan status is correct
        assert.Check(t, !context.schedulerCache.IsPodOrphaned(podName1), "pod1 
should not be orphaned")
        assert.Check(t, !context.schedulerCache.IsPodOrphaned(podName2), "pod2 
should not be orphaned")
diff --git a/pkg/cache/external/scheduler_cache.go 
b/pkg/cache/external/scheduler_cache.go
index a7ef737f..fa4d8675 100644
--- a/pkg/cache/external/scheduler_cache.go
+++ b/pkg/cache/external/scheduler_cache.go
@@ -32,18 +32,9 @@ import (
        "k8s.io/kubernetes/pkg/scheduler/framework"
 
        "github.com/apache/yunikorn-k8shim/pkg/client"
-       "github.com/apache/yunikorn-k8shim/pkg/common"
        "github.com/apache/yunikorn-k8shim/pkg/common/utils"
        "github.com/apache/yunikorn-k8shim/pkg/locking"
        "github.com/apache/yunikorn-k8shim/pkg/log"
-       "github.com/apache/yunikorn-scheduler-interface/lib/go/si"
-)
-
-type UpdateType int
-
-const (
-       AddOccupiedResource UpdateType = iota
-       SubOccupiedResource
 )
 
 // SchedulerCache maintains some critical information about nodes and pods 
used for scheduling.
@@ -59,8 +50,6 @@ const (
 // is called in the plugin to signify completion of the allocation, it is 
removed.
 type SchedulerCache struct {
        nodesMap              map[string]*framework.NodeInfo // node name to 
NodeInfo map
-       nodeCapacity          map[string]*si.Resource        // node name to 
node resource capacity
-       nodeOccupied          map[string]*si.Resource        // node name to 
node occupied resources
        podsMap               map[string]*v1.Pod
        pcMap                 map[string]*schedulingv1.PriorityClass
        assignedPods          map[string]string      // map of pods to the node 
they are currently assigned to
@@ -90,8 +79,6 @@ type taskBloomFilter struct {
 func NewSchedulerCache(clients *client.Clients) *SchedulerCache {
        cache := &SchedulerCache{
                nodesMap:              make(map[string]*framework.NodeInfo),
-               nodeCapacity:          make(map[string]*si.Resource),
-               nodeOccupied:          make(map[string]*si.Resource),
                podsMap:               make(map[string]*v1.Pod),
                pcMap:                 
make(map[string]*schedulingv1.PriorityClass),
                assignedPods:          make(map[string]string),
@@ -197,8 +184,6 @@ func (cache *SchedulerCache) updateNode(node *v1.Node) 
(*v1.Node, []*v1.Pod) {
                log.Log(log.ShimCacheExternal).Debug("Adding node to cache", 
zap.String("nodeName", node.Name))
                nodeInfo = framework.NewNodeInfo()
                cache.nodesMap[node.Name] = nodeInfo
-               cache.nodeCapacity[node.Name] = 
common.GetNodeResource(&node.Status)
-               cache.nodeOccupied[node.Name] = 
common.NewResourceBuilder().Build()
                cache.nodesInfo = nil
                nodeInfo.SetNode(node)
 
@@ -253,8 +238,6 @@ func (cache *SchedulerCache) removeNode(node *v1.Node) 
(*v1.Node, []*v1.Pod) {
 
        log.Log(log.ShimCacheExternal).Debug("Removing node from cache", 
zap.String("nodeName", node.Name))
        delete(cache.nodesMap, node.Name)
-       delete(cache.nodeOccupied, node.Name)
-       delete(cache.nodeCapacity, node.Name)
        cache.nodesInfo = nil
        cache.nodesInfoPodsWithAffinity = nil
        cache.nodesInfoPodsWithReqAntiAffinity = nil
@@ -263,72 +246,6 @@ func (cache *SchedulerCache) removeNode(node *v1.Node) 
(*v1.Node, []*v1.Pod) {
        return result, orphans
 }
 
-func (cache *SchedulerCache) SnapshotResources(nodeName string) (capacity 
*si.Resource, occupied *si.Resource, ok bool) {
-       cache.lock.RLock()
-       defer cache.lock.RUnlock()
-
-       occupied, ok1 := cache.nodeOccupied[nodeName]
-       capacity, ok2 := cache.nodeCapacity[nodeName]
-       if !ok1 || !ok2 {
-               log.Log(log.ShimCacheExternal).Warn("Unable to snapshot 
resources for node", zap.String("nodeName", nodeName))
-               return nil, nil, false
-       }
-       return capacity, occupied, true
-}
-
-func (cache *SchedulerCache) UpdateCapacity(nodeName string, resource 
*si.Resource) (capacity *si.Resource, occupied *si.Resource, ok bool) {
-       cache.lock.Lock()
-       defer cache.lock.Unlock()
-
-       occupied, ok1 := cache.nodeOccupied[nodeName]
-       _, ok2 := cache.nodeCapacity[nodeName]
-       if !ok1 || !ok2 {
-               log.Log(log.ShimCacheExternal).Warn("Unable to update capacity 
for node", zap.String("nodeName", nodeName))
-               return nil, nil, false
-       }
-       cache.nodeCapacity[nodeName] = resource
-       return resource, occupied, true
-}
-
-func (cache *SchedulerCache) UpdateOccupiedResource(nodeName string, namespace 
string, podName string, resource *si.Resource, opt UpdateType) (node *v1.Node, 
capacity *si.Resource, occupied *si.Resource, ok bool) {
-       cache.lock.Lock()
-       defer cache.lock.Unlock()
-
-       nodeInfo, ok1 := cache.nodesMap[nodeName]
-       occupied, ok2 := cache.nodeOccupied[nodeName]
-       capacity, ok3 := cache.nodeCapacity[nodeName]
-       if !ok1 || !ok2 || !ok3 {
-               log.Log(log.ShimCacheExternal).Warn("Unable to update occupied 
resources for node",
-                       zap.String("nodeName", nodeName),
-                       zap.String("namespace", namespace),
-                       zap.String("podName", podName))
-               return nil, nil, nil, false
-       }
-       node = nodeInfo.Node()
-
-       switch opt {
-       case AddOccupiedResource:
-               log.Log(log.ShimCacheExternal).Info("Adding occupied resources 
to node",
-                       zap.String("nodeID", nodeName),
-                       zap.String("namespace", namespace),
-                       zap.String("podName", podName),
-                       zap.Stringer("occupied", resource))
-               occupied = common.Add(occupied, resource)
-               cache.nodeOccupied[nodeName] = occupied
-       case SubOccupiedResource:
-               log.Log(log.ShimCacheExternal).Info("Subtracting occupied 
resources from node",
-                       zap.String("nodeID", nodeName),
-                       zap.String("namespace", namespace),
-                       zap.String("podName", podName),
-                       zap.Stringer("occupied", resource))
-               occupied = common.Sub(occupied, resource)
-               cache.nodeOccupied[nodeName] = occupied
-       default:
-               // noop
-       }
-       return node, capacity, occupied, true
-}
-
 func (cache *SchedulerCache) GetPriorityClass(name string) 
*schedulingv1.PriorityClass {
        cache.lock.RLock()
        defer cache.lock.RUnlock()
diff --git a/pkg/cache/external/scheduler_cache_test.go 
b/pkg/cache/external/scheduler_cache_test.go
index c443491b..f87464b6 100644
--- a/pkg/cache/external/scheduler_cache_test.go
+++ b/pkg/cache/external/scheduler_cache_test.go
@@ -31,7 +31,6 @@ import (
        "k8s.io/kubernetes/pkg/scheduler/framework"
 
        "github.com/apache/yunikorn-k8shim/pkg/client"
-       "github.com/apache/yunikorn-k8shim/pkg/common"
 )
 
 const (
@@ -1097,90 +1096,6 @@ func TestUpdatePVCRefCounts(t *testing.T) {
        assert.Check(t, 
!cache.IsPVCUsedByPods(framework.GetNamespacedName(pod2.Namespace, pvcName2)), 
"pvc2 is in pvcRefCounts")
 }
 
-func TestNodeResources(t *testing.T) {
-       cache := NewSchedulerCache(client.NewMockedAPIProvider(false).GetAPIs())
-       resourceList := make(map[v1.ResourceName]resource.Quantity)
-       resourceList["memory"] = *resource.NewQuantity(1024*1000*1000, 
resource.DecimalSI)
-       resourceList["cpu"] = *resource.NewQuantity(10, resource.DecimalSI)
-       node := &v1.Node{
-               ObjectMeta: apis.ObjectMeta{
-                       Name:      host1,
-                       Namespace: "default",
-                       UID:       nodeUID1,
-               },
-               Status: v1.NodeStatus{
-                       Allocatable: resourceList,
-               },
-               Spec: v1.NodeSpec{
-                       Unschedulable: false,
-               },
-       }
-       cache.UpdateNode(node)
-
-       // test snapshot with missing node
-       capacity, occupied, ok := cache.SnapshotResources("missing")
-       assert.Assert(t, !ok, "got result for missing host")
-       assert.Assert(t, capacity == nil, "got capacity for missing host")
-       assert.Assert(t, occupied == nil, "got occupied for missing host")
-
-       // test snapshot with existing, unoccupied node
-       capacity, occupied, ok = cache.SnapshotResources(host1)
-       assert.Assert(t, ok, "no result for host1")
-       assert.Equal(t, int64(1024*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
-       assert.Equal(t, int64(10*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
-       assert.Equal(t, 0, len(occupied.Resources), "non-empty occupied 
resources")
-
-       res1 := common.NewResourceBuilder().AddResource("memory", 
2048*1000*1000).AddResource("vcore", 20000).Build()
-       res2 := common.NewResourceBuilder().AddResource("memory", 
512*1000*1000).AddResource("vcore", 5000).Build()
-
-       // update capacity with missing node
-       capacity, occupied, ok = cache.UpdateCapacity("missing", res1)
-       assert.Assert(t, !ok, "got result for missing host")
-       assert.Assert(t, capacity == nil, "got capacity for missing host")
-       assert.Assert(t, occupied == nil, "got occupied for missing host")
-
-       // update capacity with real node
-       capacity, occupied, ok = cache.UpdateCapacity(host1, res1)
-       assert.Assert(t, ok, "no result for host1")
-       assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
-       assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
-       assert.Equal(t, 0, len(occupied.Resources), "non-empty occupied 
resources")
-
-       // update occupied resources with missing node
-       node, capacity, occupied, ok = cache.UpdateOccupiedResource("missing", 
"default", "podName", res2, AddOccupiedResource)
-       assert.Assert(t, !ok, "got result for missing host")
-       assert.Assert(t, node == nil, "got node for missing host")
-       assert.Assert(t, capacity == nil, "got capacity for missing host")
-       assert.Assert(t, occupied == nil, "got occupied for missing host")
-
-       // update occupied resources with real node
-       node, capacity, occupied, ok = cache.UpdateOccupiedResource(host1, 
"default", "podName", res2, AddOccupiedResource)
-       assert.Assert(t, ok, "no result for host1")
-       assert.Equal(t, host1, node.Name, "wrong host name")
-       assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
-       assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
-       assert.Equal(t, int64(512*1000*1000), 
occupied.Resources["memory"].Value, "wrong memory occupied for host1")
-       assert.Equal(t, int64(5*1000), occupied.Resources["vcore"].Value, 
"wrong vcore occupied for host1")
-
-       // retrieve snapshot again
-       capacity, occupied, ok = cache.SnapshotResources(host1)
-       assert.Assert(t, ok, "no result for host1")
-       assert.Equal(t, host1, node.Name, "wrong host name")
-       assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
-       assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
-       assert.Equal(t, int64(512*1000*1000), 
occupied.Resources["memory"].Value, "wrong memory occupied for host1")
-       assert.Equal(t, int64(5*1000), occupied.Resources["vcore"].Value, 
"wrong vcore occupied for host1")
-
-       // subtract occupied resources with real node
-       node, capacity, occupied, ok = cache.UpdateOccupiedResource(host1, 
"default", "podName", res2, SubOccupiedResource)
-       assert.Assert(t, ok, "no result for host1")
-       assert.Equal(t, host1, node.Name, "wrong host name")
-       assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
-       assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
-       assert.Equal(t, int64(0), occupied.Resources["memory"].Value, "wrong 
memory occupied for host1")
-       assert.Equal(t, int64(0), occupied.Resources["vcore"].Value, "wrong 
vcore occupied for host1")
-}
-
 func TestOrphanPods(t *testing.T) {
        cache := NewSchedulerCache(client.NewMockedAPIProvider(false).GetAPIs())
        resourceList := make(map[v1.ResourceName]resource.Quantity)
diff --git a/pkg/common/si_helper.go b/pkg/common/si_helper.go
index d2ef9a5c..272e18a1 100644
--- a/pkg/common/si_helper.go
+++ b/pkg/common/si_helper.go
@@ -187,13 +187,12 @@ func CreateReleaseRequestForForeignPod(uid, partition 
string) *si.AllocationRequ
        }
 }
 
-// CreateUpdateRequestForUpdatedNode builds a NodeRequest for capacity and 
occupied resource updates
-func CreateUpdateRequestForUpdatedNode(nodeID string, capacity *si.Resource, 
occupied *si.Resource) *si.NodeRequest {
+// CreateUpdateRequestForUpdatedNode builds a NodeRequest for capacity updates
+func CreateUpdateRequestForUpdatedNode(nodeID string, capacity *si.Resource) 
*si.NodeRequest {
        nodeInfo := &si.NodeInfo{
                NodeID:              nodeID,
                Attributes:          map[string]string{},
                SchedulableResource: capacity,
-               OccupiedResource:    occupied,
                Action:              si.NodeInfo_UPDATE,
        }
 
diff --git a/pkg/common/si_helper_test.go b/pkg/common/si_helper_test.go
index 0439f7b3..9943f55c 100644
--- a/pkg/common/si_helper_test.go
+++ b/pkg/common/si_helper_test.go
@@ -211,12 +211,10 @@ func TestCreateTagsForTask(t *testing.T) {
 
 func TestCreateUpdateRequestForUpdatedNode(t *testing.T) {
        capacity := NewResourceBuilder().AddResource(common.Memory, 
200).AddResource(common.CPU, 2).Build()
-       occupied := NewResourceBuilder().AddResource(common.Memory, 
50).AddResource(common.CPU, 1).Build()
-       request := CreateUpdateRequestForUpdatedNode(nodeID, capacity, occupied)
+       request := CreateUpdateRequestForUpdatedNode(nodeID, capacity)
        assert.Equal(t, len(request.Nodes), 1)
        assert.Equal(t, request.Nodes[0].NodeID, nodeID)
        assert.Equal(t, request.Nodes[0].SchedulableResource, capacity)
-       assert.Equal(t, request.Nodes[0].OccupiedResource, occupied)
        assert.Equal(t, len(request.Nodes[0].Attributes), 0)
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to