This is an automated email from the ASF dual-hosted git repository.

pbacsko pushed a commit to branch YUNIKORN-2834_add
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git

commit 6152e8cd18f76ced2e92f76ea7aaba7f0f2e8e00
Author: Peter Bacsko <[email protected]>
AuthorDate: Tue Aug 27 08:21:15 2024 +0200

    [YUNIKORN-2834] [shim] Add non-YuniKorn allocation tracking logic
---
 go.mod                                     |   4 +-
 go.sum                                     |   8 +-
 pkg/cache/context.go                       |  58 +++----
 pkg/cache/context_test.go                  | 252 +++++++++--------------------
 pkg/cache/external/scheduler_cache.go      |   6 -
 pkg/cache/external/scheduler_cache_test.go |   9 --
 pkg/common/constants/constants.go          |   1 +
 pkg/common/si_helper.go                    |  47 ++++++
 pkg/shim/scheduler_mock_test.go            |   1 -
 9 files changed, 149 insertions(+), 237 deletions(-)

diff --git a/go.mod b/go.mod
index 13ad673e..fc0ffd5d 100644
--- a/go.mod
+++ b/go.mod
@@ -23,8 +23,8 @@ go 1.22.0
 toolchain go1.22.5
 
 require (
-       github.com/apache/yunikorn-core v0.0.0-20240908061623-6f06490bcfa3
-       github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240827015655-68e8c6cca28a
+       github.com/apache/yunikorn-core v0.0.0-20241002095736-a2d3d43a145d
+       github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240924203603-aaf51c93d3a0
        github.com/google/go-cmp v0.6.0
        github.com/google/uuid v1.6.0
        github.com/looplab/fsm v1.0.1
diff --git a/go.sum b/go.sum
index f1203810..c9f1c1bd 100644
--- a/go.sum
+++ b/go.sum
@@ -8,10 +8,10 @@ github.com/NYTimes/gziphandler v1.1.1 
h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq
 github.com/NYTimes/gziphandler v1.1.1/go.mod 
h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
 github.com/antlr4-go/antlr/v4 v4.13.0 
h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
 github.com/antlr4-go/antlr/v4 v4.13.0/go.mod 
h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
-github.com/apache/yunikorn-core v0.0.0-20240908061623-6f06490bcfa3 
h1:ySu0cpFSYFGNtf+PZw4ulzO+cWOyJMYJs+AjmwGWM80=
-github.com/apache/yunikorn-core v0.0.0-20240908061623-6f06490bcfa3/go.mod 
h1:HYeyzHhZt43oG54pasKHrwHM+Jeji8nFoAE2bcLWLYg=
-github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240827015655-68e8c6cca28a 
h1:3WRXGTvhunGBZj8AVZDxx7Bs/AXiH9mvf2jYcuDyklA=
-github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240827015655-68e8c6cca28a/go.mod 
h1:co3uU98sj1CUTPNTM13lTyi+CY0DOgDndDW2KiUjktU=
+github.com/apache/yunikorn-core v0.0.0-20241002095736-a2d3d43a145d 
h1:awo2goBrw25P1aFNZgYJ0q7V+5ycMqMhvI60B75OzQg=
+github.com/apache/yunikorn-core v0.0.0-20241002095736-a2d3d43a145d/go.mod 
h1:q6OXYpCTGvMJxsEorpIF6icKM/IioMmU6KcsclV1kI0=
+github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240924203603-aaf51c93d3a0 
h1:/9j0YXuifvoOl4YVEbO0r+DPkkYLzaQ+/ac+xCc7SY8=
+github.com/apache/yunikorn-scheduler-interface 
v0.0.0-20240924203603-aaf51c93d3a0/go.mod 
h1:co3uU98sj1CUTPNTM13lTyi+CY0DOgDndDW2KiUjktU=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 
h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod 
h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a 
h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
diff --git a/pkg/cache/context.go b/pkg/cache/context.go
index aaea690a..1d013cf3 100644
--- a/pkg/cache/context.go
+++ b/pkg/cache/context.go
@@ -215,12 +215,8 @@ func (ctx *Context) updateNodeInternal(node *v1.Node, 
register bool) {
 
                if !common.Equals(prevCapacity, newCapacity) {
                        // update capacity
-                       if capacity, occupied, ok := 
ctx.schedulerCache.UpdateCapacity(node.Name, newCapacity); ok {
-                               if err := ctx.updateNodeResources(node, 
capacity, occupied); err != nil {
-                                       log.Log(log.ShimContext).Warn("Failed 
to update node capacity", zap.Error(err))
-                               }
-                       } else {
-                               log.Log(log.ShimContext).Warn("Failed to update 
cached node capacity", zap.String("nodeName", node.Name))
+                       if err := ctx.updateNodeResources(node, newCapacity, 
nil); err != nil {
+                               log.Log(log.ShimContext).Warn("Failed to update 
node capacity", zap.Error(err))
                        }
                }
        }
@@ -370,7 +366,11 @@ func (ctx *Context) updateForeignPod(pod *v1.Pod) {
                                zap.String("podName", pod.Name),
                                zap.String("podStatusBefore", podStatusBefore),
                                zap.String("podStatusCurrent", 
string(pod.Status.Phase)))
-                       ctx.updateNodeOccupiedResources(pod.Spec.NodeName, 
pod.Namespace, pod.Name, common.GetPodResource(pod), 
schedulercache.AddOccupiedResource)
+                       allocReq := common.CreateAllocationForForeignPod(pod)
+                       if err := 
ctx.apiProvider.GetAPIs().SchedulerAPI.UpdateAllocation(allocReq); err != nil {
+                               log.Log(log.ShimContext).Error("failed to add 
foreign allocation to the core",
+                                       zap.Error(err))
+                       }
                } else {
                        // pod is orphaned (references an unknown node)
                        log.Log(log.ShimContext).Info("skipping occupied 
resource update for assigned orphaned pod",
@@ -394,8 +394,12 @@ func (ctx *Context) updateForeignPod(pod *v1.Pod) {
                                zap.String("podStatusCurrent", 
string(pod.Status.Phase)))
                        // this means pod is terminated
                        // we need sub the occupied resource and re-sync with 
the scheduler-core
-                       ctx.updateNodeOccupiedResources(pod.Spec.NodeName, 
pod.Namespace, pod.Name, common.GetPodResource(pod), 
schedulercache.SubOccupiedResource)
                        ctx.schedulerCache.RemovePod(pod)
+                       releaseReq := 
common.CreateReleaseRequestForForeignPod(string(pod.UID), 
constants.DefaultPartition)
+                       if err := 
ctx.apiProvider.GetAPIs().SchedulerAPI.UpdateAllocation(releaseReq); err != nil 
{
+                               log.Log(log.ShimContext).Error("failed to 
remove foreign allocation from the core",
+                                       zap.Error(err))
+                       }
                } else {
                        // pod is orphaned (references an unknown node)
                        log.Log(log.ShimContext).Info("skipping occupied 
resource update for terminated orphaned pod",
@@ -441,38 +445,14 @@ func (ctx *Context) deleteYuniKornPod(pod *v1.Pod) {
 }
 
 func (ctx *Context) deleteForeignPod(pod *v1.Pod) {
-       oldPod := ctx.schedulerCache.GetPod(string(pod.UID))
-       if oldPod == nil {
-               // if pod is not in scheduler cache, no node updates are needed
-               log.Log(log.ShimContext).Debug("unknown foreign pod deleted, no 
resource updated needed",
-                       zap.String("namespace", pod.Namespace),
-                       zap.String("podName", pod.Name))
-               return
+       releaseReq := common.CreateReleaseRequestForForeignPod(string(pod.UID), 
constants.DefaultPartition)
+       if err := 
ctx.apiProvider.GetAPIs().SchedulerAPI.UpdateAllocation(releaseReq); err != nil 
{
+               log.Log(log.ShimContext).Error("failed to remove foreign 
allocation from the core",
+                       zap.Error(err))
        }
 
-       // conditions for release:
-       //   1. pod is already assigned to a node
-       //   2. pod was not in a terminal state before
-       //   3. pod references a known node
-       if !utils.IsPodTerminated(oldPod) {
-               if !ctx.schedulerCache.IsPodOrphaned(string(oldPod.UID)) {
-                       log.Log(log.ShimContext).Debug("foreign pod deleted, 
triggering occupied resource update",
-                               zap.String("namespace", pod.Namespace),
-                               zap.String("podName", pod.Name),
-                               zap.String("podStatusBefore", 
string(oldPod.Status.Phase)),
-                               zap.String("podStatusCurrent", 
string(pod.Status.Phase)))
-                       // this means pod is terminated
-                       // we need sub the occupied resource and re-sync with 
the scheduler-core
-                       ctx.updateNodeOccupiedResources(pod.Spec.NodeName, 
pod.Namespace, pod.Name, common.GetPodResource(pod), 
schedulercache.SubOccupiedResource)
-               } else {
-                       // pod is orphaned (references an unknown node)
-                       log.Log(log.ShimContext).Info("skipping occupied 
resource update for removed orphaned pod",
-                               zap.String("namespace", pod.Namespace),
-                               zap.String("podName", pod.Name),
-                               zap.String("nodeName", pod.Spec.NodeName))
-               }
-               ctx.schedulerCache.RemovePod(pod)
-       }
+       log.Log(log.ShimContext).Debug("removing pod from cache", 
zap.String("podName", pod.Name))
+       ctx.schedulerCache.RemovePod(pod)
 }
 
 func (ctx *Context) updateNodeOccupiedResources(nodeName string, namespace 
string, podName string, resource *si.Resource, opt schedulercache.UpdateType) {
@@ -1560,7 +1540,7 @@ func (ctx *Context) decommissionNode(node *v1.Node) error 
{
 }
 
 func (ctx *Context) updateNodeResources(node *v1.Node, capacity *si.Resource, 
occupied *si.Resource) error {
-       request := common.CreateUpdateRequestForUpdatedNode(node.Name, 
capacity, occupied)
+       request := common.CreateUpdateRequestForUpdatedNode(node.Name, 
capacity, nil)
        return ctx.apiProvider.GetAPIs().SchedulerAPI.UpdateNode(request)
 }
 
diff --git a/pkg/cache/context_test.go b/pkg/cache/context_test.go
index 6d7bfafa..d82e3b17 100644
--- a/pkg/cache/context_test.go
+++ b/pkg/cache/context_test.go
@@ -35,9 +35,7 @@ import (
        k8sEvents "k8s.io/client-go/tools/events"
        "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
 
-       schedulercache "github.com/apache/yunikorn-k8shim/pkg/cache/external"
        "github.com/apache/yunikorn-k8shim/pkg/client"
-       "github.com/apache/yunikorn-k8shim/pkg/common"
        "github.com/apache/yunikorn-k8shim/pkg/common/constants"
        "github.com/apache/yunikorn-k8shim/pkg/common/events"
        "github.com/apache/yunikorn-k8shim/pkg/common/test"
@@ -182,8 +180,8 @@ func TestUpdateNodes(t *testing.T) {
        })
 
        oldNodeResource := make(map[v1.ResourceName]resource.Quantity)
-       oldNodeResource[v1.ResourceName("memory")] = 
*resource.NewQuantity(1024*1000*1000, resource.DecimalSI)
-       oldNodeResource[v1.ResourceName("cpu")] = *resource.NewQuantity(2, 
resource.DecimalSI)
+       oldNodeResource["memory"] = *resource.NewQuantity(1024*1000*1000, 
resource.DecimalSI)
+       oldNodeResource["cpu"] = *resource.NewQuantity(2, resource.DecimalSI)
        oldNode := v1.Node{
                ObjectMeta: apis.ObjectMeta{
                        Name:      Host1,
@@ -196,8 +194,8 @@ func TestUpdateNodes(t *testing.T) {
        }
 
        newNodeResource := make(map[v1.ResourceName]resource.Quantity)
-       newNodeResource[v1.ResourceName("memory")] = 
*resource.NewQuantity(2048*1000*1000, resource.DecimalSI)
-       newNodeResource[v1.ResourceName("cpu")] = *resource.NewQuantity(4, 
resource.DecimalSI)
+       newNodeResource["memory"] = *resource.NewQuantity(2048*1000*1000, 
resource.DecimalSI)
+       newNodeResource["cpu"] = *resource.NewQuantity(4, resource.DecimalSI)
        newNode := v1.Node{
                ObjectMeta: apis.ObjectMeta{
                        Name:      Host1,
@@ -211,12 +209,6 @@ func TestUpdateNodes(t *testing.T) {
 
        ctx.addNode(&oldNode)
        ctx.updateNode(&oldNode, &newNode)
-
-       _, capacity, _, ok := ctx.schedulerCache.UpdateOccupiedResource(
-               Host1, "n/a", "n/a", nil, schedulercache.AddOccupiedResource)
-       assert.Assert(t, ok, "unable to retrieve node capacity")
-       assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources[siCommon.Memory].Value)
-       assert.Equal(t, int64(4000), capacity.Resources[siCommon.CPU].Value)
 }
 
 func TestDeleteNodes(t *testing.T) {
@@ -529,39 +521,11 @@ func TestAddUpdatePodForeign(t *testing.T) {
        defer dispatcher.UnregisterAllEventHandlers()
        defer dispatcher.Stop()
 
-       executed := false
-       expectAdd := false
-       expectRemove := false
-       tc := ""
-
-       validatorFunc := func(request *si.NodeRequest) error {
-               assert.Equal(t, len(request.Nodes), 1, "%s: wrong node count", 
tc)
-               updatedNode := request.Nodes[0]
-               assert.Equal(t, updatedNode.NodeID, Host1, "%s: wrong nodeID", 
tc)
-               switch updatedNode.Action {
-               case si.NodeInfo_CREATE_DRAIN:
-                       return nil
-               case si.NodeInfo_DRAIN_TO_SCHEDULABLE:
-                       return nil
-               case si.NodeInfo_UPDATE:
-                       executed = true
-               default:
-                       assert.Equal(t, false, "Unexpected action: %d", 
updatedNode.Action)
-                       return nil
-               }
-               assert.Equal(t, 
updatedNode.SchedulableResource.Resources[siCommon.Memory].Value, 
int64(10000*1000*1000), "%s: wrong schedulable memory", tc)
-               assert.Equal(t, 
updatedNode.SchedulableResource.Resources[siCommon.CPU].Value, int64(10000), 
"%s: wrong schedulable cpu", tc)
-               if expectAdd {
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.Memory].Value, 
int64(1000*1000*1000), "%s: wrong occupied memory (add)", tc)
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.CPU].Value, int64(500), "%s: 
wrong occupied cpu (add)", tc)
-               }
-               if expectRemove {
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.Memory].Value, int64(0), "%s: 
wrong occupied memory (remove)", tc)
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.CPU].Value, int64(0), "%s: 
wrong occupied cpu (remove)", tc)
-               }
+       var allocRequest *si.AllocationRequest
+       apiProvider.MockSchedulerAPIUpdateAllocationFn(func(request 
*si.AllocationRequest) error {
+               allocRequest = request
                return nil
-       }
-
+       })
        apiProvider.MockSchedulerAPIUpdateNodeFn(func(request *si.NodeRequest) 
error {
                for _, node := range request.Nodes {
                        if node.Action == si.NodeInfo_CREATE_DRAIN {
@@ -571,33 +535,33 @@ func TestAddUpdatePodForeign(t *testing.T) {
                                })
                        }
                }
-               return validatorFunc(request)
+               return nil
        })
-
-       host1 := nodeForTest(Host1, "10G", "10")
-       context.updateNode(nil, host1)
+       node := v1.Node{
+               ObjectMeta: apis.ObjectMeta{
+                       Name:      Host1,
+                       Namespace: "default",
+                       UID:       uid1,
+               },
+       }
+       context.addNode(&node)
 
        // pod is not assigned to any node
        pod1 := foreignPod(podName1, "1G", "500m")
        pod1.Status.Phase = v1.PodPending
        pod1.Spec.NodeName = ""
 
-       // validate add
-       tc = "add-pod1"
-       executed = false
-       expectAdd = false
-       expectRemove = false
+       // validate add (pending, no node assigned)
+       allocRequest = nil
        context.AddPod(pod1)
-       assert.Assert(t, !executed, "unexpected update")
+       assert.Assert(t, allocRequest == nil, "unexpected update")
        pod := context.schedulerCache.GetPod(string(pod1.UID))
        assert.Assert(t, pod == nil, "unassigned pod found in cache")
 
-       // validate update
-       tc = "update-pod1"
-       executed = false
-       expectRemove = false
+       // validate update (no change)
+       allocRequest = nil
        context.UpdatePod(nil, pod1)
-       assert.Assert(t, !executed, "unexpected update")
+       assert.Assert(t, allocRequest == nil, "unexpected update")
        assert.Assert(t, pod == nil, "unassigned pod found in cache")
 
        // pod is assigned to a node but still in pending state, should update
@@ -606,155 +570,101 @@ func TestAddUpdatePodForeign(t *testing.T) {
        pod2.Spec.NodeName = Host1
 
        // validate add
-       tc = "add-pod2"
-       executed = false
-       expectAdd = true
-       expectRemove = false
        context.AddPod(pod2)
-       assert.Assert(t, executed, "updated expected")
+       assert.Assert(t, allocRequest != nil, "update expected")
+       assertAddForeignPod(t, podName2, Host1, allocRequest)
        pod = context.schedulerCache.GetPod(string(pod2.UID))
        assert.Assert(t, pod != nil, "pod not found in cache")
 
-       // validate update
-       tc = "update-pod2"
-       executed = false
-       expectAdd = false
-       expectRemove = false
+       // validate update (no change)
+       allocRequest = nil
        context.UpdatePod(nil, pod2)
-       assert.Assert(t, !executed, "unexpected update")
+       assert.Assert(t, allocRequest == nil, "unexpected update")
        pod = context.schedulerCache.GetPod(string(pod2.UID))
        assert.Assert(t, pod != nil, "pod not found in cache")
 
        // validate update when not already in cache
-       tc = "update-pod2-nocache-pre"
-       executed = false
-       expectAdd = false
-       expectRemove = true
+       allocRequest = nil
        context.DeletePod(pod2)
-       assert.Assert(t, executed, "expected update")
-       tc = "update-pod2-nocache"
-       executed = false
-       expectAdd = true
-       expectRemove = false
+       assertReleaseForeignPod(t, podName2, allocRequest)
+
+       allocRequest = nil
        context.UpdatePod(nil, pod2)
-       assert.Assert(t, executed, "expected update")
+       assert.Assert(t, allocRequest != nil, "expected update")
        pod = context.schedulerCache.GetPod(string(pod2.UID))
        assert.Assert(t, pod != nil, "pod not found in cache")
+       assertAddForeignPod(t, podName2, Host1, allocRequest)
 
        // pod is failed, should trigger update if already in cache
        pod3 := pod2.DeepCopy()
        pod3.Status.Phase = v1.PodFailed
 
        // validate add
-       tc = "add-pod3"
-       executed = false
-       expectAdd = false
-       expectRemove = true
+       allocRequest = nil
        context.AddPod(pod3)
-       assert.Assert(t, executed, "expected update")
+       assert.Assert(t, allocRequest != nil, "expected update")
        pod = context.schedulerCache.GetPod(string(pod3.UID))
        assert.Assert(t, pod == nil, "failed pod found in cache")
+       assert.Assert(t, allocRequest.Releases != nil) // expecting a release 
due to pod status
+       assertReleaseForeignPod(t, podName2, allocRequest)
 
        // validate update when not already in cache
-       tc = "update-pod3-pre"
-       executed = false
-       expectAdd = true
-       expectRemove = false
+       allocRequest = nil
        context.AddPod(pod2)
-       tc = "update-pod3"
-       executed = false
-       expectAdd = false
-       expectRemove = true
+       assert.Assert(t, allocRequest != nil, "expected update")
+       allocRequest = nil
        context.UpdatePod(nil, pod3)
-       assert.Assert(t, executed, "expected update")
+       assert.Assert(t, allocRequest != nil, "expected update")
        pod = context.schedulerCache.GetPod(string(pod3.UID))
        assert.Assert(t, pod == nil, "failed pod found in cache")
 }
 
+func assertAddForeignPod(t *testing.T, podName, host string, allocRequest 
*si.AllocationRequest) {
+       t.Helper()
+       assert.Equal(t, 1, len(allocRequest.Allocations))
+       tags := allocRequest.Allocations[0].AllocationTags
+       assert.Equal(t, 2, len(tags))
+       assert.Equal(t, siCommon.AllocTypeDefault, tags[siCommon.Foreign])
+       assert.Equal(t, podName, allocRequest.Allocations[0].AllocationKey)
+       assert.Equal(t, host, allocRequest.Allocations[0].NodeID)
+}
+
+func assertReleaseForeignPod(t *testing.T, podName string, allocRequest 
*si.AllocationRequest) {
+       t.Helper()
+       assert.Assert(t, allocRequest.Releases != nil) // expecting a release 
due to pod status
+       assert.Equal(t, 1, len(allocRequest.Releases.AllocationsToRelease))
+       assert.Equal(t, podName, 
allocRequest.Releases.AllocationsToRelease[0].AllocationKey)
+       assert.Equal(t, constants.DefaultPartition, 
allocRequest.Releases.AllocationsToRelease[0].PartitionName)
+       assert.Equal(t, "", 
allocRequest.Releases.AllocationsToRelease[0].ApplicationID)
+       assert.Equal(t, si.TerminationType_STOPPED_BY_RM, 
allocRequest.Releases.AllocationsToRelease[0].TerminationType)
+}
+
 func TestDeletePodForeign(t *testing.T) {
        context, apiProvider := initContextAndAPIProviderForTest()
-       dispatcher.Start()
-       defer dispatcher.UnregisterAllEventHandlers()
-       defer dispatcher.Stop()
 
-       executed := false
-       expectAdd := false
-       expectRemove := false
-       tc := ""
-
-       validatorFunc := func(request *si.NodeRequest) error {
-               executed = true
-               assert.Equal(t, len(request.Nodes), 1, "%s: wrong node count", 
tc)
-               updatedNode := request.Nodes[0]
-               switch updatedNode.Action {
-               case si.NodeInfo_CREATE_DRAIN:
-                       return nil
-               case si.NodeInfo_DRAIN_TO_SCHEDULABLE:
-                       return nil
-               case si.NodeInfo_UPDATE:
-                       executed = true
-               default:
-                       assert.Equal(t, false, "Unexpected action: %d", 
updatedNode.Action)
-                       return nil
-               }
-               assert.Equal(t, updatedNode.NodeID, Host1, "%s: wrong nodeID", 
tc)
-               assert.Equal(t, updatedNode.Action, si.NodeInfo_UPDATE, "%s: 
wrong action", tc)
-               assert.Equal(t, 
updatedNode.SchedulableResource.Resources[siCommon.Memory].Value, 
int64(10000*1000*1000), "%s: wrong schedulable memory", tc)
-               assert.Equal(t, 
updatedNode.SchedulableResource.Resources[siCommon.CPU].Value, int64(10000), 
"%s: wrong schedulable cpu", tc)
-               if expectAdd {
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.Memory].Value, 
int64(1000*1000*1000), "%s: wrong occupied memory (add)", tc)
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.CPU].Value, int64(500), "%s: 
wrong occupied cpu (add)", tc)
-               }
-               if expectRemove {
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.Memory].Value, int64(0), "%s: 
wrong occupied memory (remove)", tc)
-                       assert.Equal(t, 
updatedNode.OccupiedResource.Resources[siCommon.CPU].Value, int64(0), "%s: 
wrong occupied cpu (remove)", tc)
-               }
+       var allocRequest *si.AllocationRequest
+       apiProvider.MockSchedulerAPIUpdateAllocationFn(func(request 
*si.AllocationRequest) error {
+               allocRequest = request
                return nil
-       }
-
-       apiProvider.MockSchedulerAPIUpdateNodeFn(func(request *si.NodeRequest) 
error {
-               for _, node := range request.Nodes {
-                       if node.Action == si.NodeInfo_CREATE_DRAIN {
-                               dispatcher.Dispatch(CachedSchedulerNodeEvent{
-                                       NodeID: node.NodeID,
-                                       Event:  NodeAccepted,
-                               })
-                       }
-               }
-               return validatorFunc(request)
        })
 
-       host1 := nodeForTest(Host1, "10G", "10")
-       context.updateNode(nil, host1)
-
-       // add existing pod
+       // add existing foreign pod
        pod1 := foreignPod(podName1, "1G", "500m")
        pod1.Status.Phase = v1.PodRunning
        pod1.Spec.NodeName = Host1
-
-       // validate deletion of existing assigned pod
-       tc = "delete-pod1-pre"
-       executed = false
-       expectAdd = true
-       expectRemove = false
        context.AddPod(pod1)
-       tc = "delete-pod1"
-       executed = false
-       expectAdd = false
-       expectRemove = true
+       allocRequest = nil
        context.DeletePod(pod1)
-       assert.Assert(t, executed, "update not executed")
-       pod := context.schedulerCache.GetPod(string(pod1.UID))
-       assert.Assert(t, pod == nil, "deleted pod found in cache")
 
-       // validate delete when not already found
-       tc = "delete-pod1-again"
-       executed = false
-       expectAdd = false
-       expectRemove = false
-       context.DeletePod(pod1)
-       assert.Assert(t, !executed, "unexpected update")
-       pod = context.schedulerCache.GetPod(string(pod1.UID))
+       assert.Assert(t, allocRequest != nil, "update not executed")
+       assert.Equal(t, 0, len(allocRequest.Allocations))
+       assert.Assert(t, allocRequest.Releases != nil)
+       assert.Equal(t, 1, len(allocRequest.Releases.AllocationsToRelease))
+       assert.Equal(t, podName1, 
allocRequest.Releases.AllocationsToRelease[0].AllocationKey)
+       assert.Equal(t, constants.DefaultPartition, 
allocRequest.Releases.AllocationsToRelease[0].PartitionName)
+       assert.Equal(t, "", 
allocRequest.Releases.AllocationsToRelease[0].ApplicationID)
+       assert.Equal(t, si.TerminationType_STOPPED_BY_RM, 
allocRequest.Releases.AllocationsToRelease[0].TerminationType)
+       pod := context.schedulerCache.GetPod(string(pod1.UID))
        assert.Assert(t, pod == nil, "deleted pod found in cache")
 }
 
@@ -2011,16 +1921,6 @@ func TestInitializeState(t *testing.T) {
        assert.Equal(t, *pc.PreemptionPolicy, policy, "wrong preemption policy")
        assert.Equal(t, pc.Annotations[constants.AnnotationAllowPreemption], 
constants.True, "wrong allow-preemption value")
 
-       // verify occupied / capacity on node
-       capacity, occupied, ok := 
context.schedulerCache.SnapshotResources(nodeName1)
-       assert.Assert(t, ok, "Unable to retrieve node resources")
-       expectedCapacity := common.ParseResource("4", "10G")
-       assert.Equal(t, expectedCapacity.Resources["vcore"].Value, 
capacity.Resources["vcore"].Value, "wrong capacity vcore")
-       assert.Equal(t, expectedCapacity.Resources["memory"].Value, 
capacity.Resources["memory"].Value, "wrong capacity memory")
-       expectedOccupied := common.ParseResource("1500m", "2G")
-       assert.Equal(t, expectedOccupied.Resources["vcore"].Value, 
occupied.Resources["vcore"].Value, "wrong occupied vcore")
-       assert.Equal(t, expectedOccupied.Resources["memory"].Value, 
occupied.Resources["memory"].Value, "wrong occupied memory")
-
        // check that pod orphan status is correct
        assert.Check(t, !context.schedulerCache.IsPodOrphaned(podName1), "pod1 
should not be orphaned")
        assert.Check(t, !context.schedulerCache.IsPodOrphaned(podName2), "pod2 
should not be orphaned")
diff --git a/pkg/cache/external/scheduler_cache.go 
b/pkg/cache/external/scheduler_cache.go
index a7ef737f..c83357c4 100644
--- a/pkg/cache/external/scheduler_cache.go
+++ b/pkg/cache/external/scheduler_cache.go
@@ -266,7 +266,6 @@ func (cache *SchedulerCache) removeNode(node *v1.Node) 
(*v1.Node, []*v1.Pod) {
 func (cache *SchedulerCache) SnapshotResources(nodeName string) (capacity 
*si.Resource, occupied *si.Resource, ok bool) {
        cache.lock.RLock()
        defer cache.lock.RUnlock()
-
        occupied, ok1 := cache.nodeOccupied[nodeName]
        capacity, ok2 := cache.nodeCapacity[nodeName]
        if !ok1 || !ok2 {
@@ -275,11 +274,9 @@ func (cache *SchedulerCache) SnapshotResources(nodeName 
string) (capacity *si.Re
        }
        return capacity, occupied, true
 }
-
 func (cache *SchedulerCache) UpdateCapacity(nodeName string, resource 
*si.Resource) (capacity *si.Resource, occupied *si.Resource, ok bool) {
        cache.lock.Lock()
        defer cache.lock.Unlock()
-
        occupied, ok1 := cache.nodeOccupied[nodeName]
        _, ok2 := cache.nodeCapacity[nodeName]
        if !ok1 || !ok2 {
@@ -289,11 +286,9 @@ func (cache *SchedulerCache) UpdateCapacity(nodeName 
string, resource *si.Resour
        cache.nodeCapacity[nodeName] = resource
        return resource, occupied, true
 }
-
 func (cache *SchedulerCache) UpdateOccupiedResource(nodeName string, namespace 
string, podName string, resource *si.Resource, opt UpdateType) (node *v1.Node, 
capacity *si.Resource, occupied *si.Resource, ok bool) {
        cache.lock.Lock()
        defer cache.lock.Unlock()
-
        nodeInfo, ok1 := cache.nodesMap[nodeName]
        occupied, ok2 := cache.nodeOccupied[nodeName]
        capacity, ok3 := cache.nodeCapacity[nodeName]
@@ -305,7 +300,6 @@ func (cache *SchedulerCache) 
UpdateOccupiedResource(nodeName string, namespace s
                return nil, nil, nil, false
        }
        node = nodeInfo.Node()
-
        switch opt {
        case AddOccupiedResource:
                log.Log(log.ShimCacheExternal).Info("Adding occupied resources 
to node",
diff --git a/pkg/cache/external/scheduler_cache_test.go 
b/pkg/cache/external/scheduler_cache_test.go
index c443491b..1b13ef7f 100644
--- a/pkg/cache/external/scheduler_cache_test.go
+++ b/pkg/cache/external/scheduler_cache_test.go
@@ -1116,43 +1116,36 @@ func TestNodeResources(t *testing.T) {
                },
        }
        cache.UpdateNode(node)
-
        // test snapshot with missing node
        capacity, occupied, ok := cache.SnapshotResources("missing")
        assert.Assert(t, !ok, "got result for missing host")
        assert.Assert(t, capacity == nil, "got capacity for missing host")
        assert.Assert(t, occupied == nil, "got occupied for missing host")
-
        // test snapshot with existing, unoccupied node
        capacity, occupied, ok = cache.SnapshotResources(host1)
        assert.Assert(t, ok, "no result for host1")
        assert.Equal(t, int64(1024*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
        assert.Equal(t, int64(10*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
        assert.Equal(t, 0, len(occupied.Resources), "non-empty occupied 
resources")
-
        res1 := common.NewResourceBuilder().AddResource("memory", 
2048*1000*1000).AddResource("vcore", 20000).Build()
        res2 := common.NewResourceBuilder().AddResource("memory", 
512*1000*1000).AddResource("vcore", 5000).Build()
-
        // update capacity with missing node
        capacity, occupied, ok = cache.UpdateCapacity("missing", res1)
        assert.Assert(t, !ok, "got result for missing host")
        assert.Assert(t, capacity == nil, "got capacity for missing host")
        assert.Assert(t, occupied == nil, "got occupied for missing host")
-
        // update capacity with real node
        capacity, occupied, ok = cache.UpdateCapacity(host1, res1)
        assert.Assert(t, ok, "no result for host1")
        assert.Equal(t, int64(2048*1000*1000), 
capacity.Resources["memory"].Value, "wrong memory capacity for host1")
        assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
        assert.Equal(t, 0, len(occupied.Resources), "non-empty occupied 
resources")
-
        // update occupied resources with missing node
        node, capacity, occupied, ok = cache.UpdateOccupiedResource("missing", 
"default", "podName", res2, AddOccupiedResource)
        assert.Assert(t, !ok, "got result for missing host")
        assert.Assert(t, node == nil, "got node for missing host")
        assert.Assert(t, capacity == nil, "got capacity for missing host")
        assert.Assert(t, occupied == nil, "got occupied for missing host")
-
        // update occupied resources with real node
        node, capacity, occupied, ok = cache.UpdateOccupiedResource(host1, 
"default", "podName", res2, AddOccupiedResource)
        assert.Assert(t, ok, "no result for host1")
@@ -1161,7 +1154,6 @@ func TestNodeResources(t *testing.T) {
        assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
        assert.Equal(t, int64(512*1000*1000), 
occupied.Resources["memory"].Value, "wrong memory occupied for host1")
        assert.Equal(t, int64(5*1000), occupied.Resources["vcore"].Value, 
"wrong vcore occupied for host1")
-
        // retrieve snapshot again
        capacity, occupied, ok = cache.SnapshotResources(host1)
        assert.Assert(t, ok, "no result for host1")
@@ -1170,7 +1162,6 @@ func TestNodeResources(t *testing.T) {
        assert.Equal(t, int64(20*1000), capacity.Resources["vcore"].Value, 
"wrong vcore capacity for host1")
        assert.Equal(t, int64(512*1000*1000), 
occupied.Resources["memory"].Value, "wrong memory occupied for host1")
        assert.Equal(t, int64(5*1000), occupied.Resources["vcore"].Value, 
"wrong vcore occupied for host1")
-
        // subtract occupied resources with real node
        node, capacity, occupied, ok = cache.UpdateOccupiedResource(host1, 
"default", "podName", res2, SubOccupiedResource)
        assert.Assert(t, ok, "no result for host1")
diff --git a/pkg/common/constants/constants.go 
b/pkg/common/constants/constants.go
index 5a848b4a..09d9ea13 100644
--- a/pkg/common/constants/constants.go
+++ b/pkg/common/constants/constants.go
@@ -66,6 +66,7 @@ const SchedulerName = "yunikorn"
 
 // OwnerReferences
 const DaemonSetType = "DaemonSet"
+const NodeKind = "Node"
 
 // Gang scheduling
 const PlaceholderContainerImage = "registry.k8s.io/pause:3.7"
diff --git a/pkg/common/si_helper.go b/pkg/common/si_helper.go
index 82a22530..52711df0 100644
--- a/pkg/common/si_helper.go
+++ b/pkg/common/si_helper.go
@@ -114,6 +114,34 @@ func CreateAllocationForTask(appID, taskID, nodeID string, 
resource *si.Resource
        }
 }
 
+func CreateAllocationForForeignPod(pod *v1.Pod) *si.AllocationRequest {
+       podType := common.AllocTypeDefault
+       for _, ref := range pod.OwnerReferences {
+               if ref.Kind == constants.NodeKind {
+                       podType = common.AllocTypeStatic
+                       break
+               }
+       }
+
+       allocation := si.Allocation{
+               AllocationTags: map[string]string{
+                       common.Foreign: podType,
+               },
+               AllocationKey:    string(pod.UID),
+               ResourcePerAlloc: GetPodResource(pod),
+               Priority:         CreatePriorityForTask(pod),
+               NodeID:           pod.Spec.NodeName,
+       }
+
+       // add creation time for ask
+       allocation.AllocationTags[common.CreationTime] = 
strconv.FormatInt(pod.CreationTimestamp.Unix(), 10)
+
+       return &si.AllocationRequest{
+               Allocations: []*si.Allocation{&allocation},
+               RmID:        conf.GetSchedulerConf().ClusterID,
+       }
+}
+
 func GetTerminationTypeFromString(terminationTypeStr string) 
si.TerminationType {
        if v, ok := si.TerminationType_value[terminationTypeStr]; ok {
                return si.TerminationType(v)
@@ -141,6 +169,25 @@ func CreateReleaseRequestForTask(appID, taskID, partition, 
terminationType strin
        }
 }
 
+func CreateReleaseRequestForForeignPod(uid, partition string) 
*si.AllocationRequest {
+       allocToRelease := make([]*si.AllocationRelease, 1)
+       allocToRelease[0] = &si.AllocationRelease{
+               AllocationKey:   uid,
+               PartitionName:   partition,
+               TerminationType: si.TerminationType_STOPPED_BY_RM,
+               Message:         "pod terminated",
+       }
+
+       releaseRequest := si.AllocationReleasesRequest{
+               AllocationsToRelease: allocToRelease,
+       }
+
+       return &si.AllocationRequest{
+               Releases: &releaseRequest,
+               RmID:     conf.GetSchedulerConf().ClusterID,
+       }
+}
+
 // CreateUpdateRequestForUpdatedNode builds a NodeRequest for capacity and 
occupied resource updates
 func CreateUpdateRequestForUpdatedNode(nodeID string, capacity *si.Resource, 
occupied *si.Resource) *si.NodeRequest {
        nodeInfo := &si.NodeInfo{
diff --git a/pkg/shim/scheduler_mock_test.go b/pkg/shim/scheduler_mock_test.go
index b67746d7..66976619 100644
--- a/pkg/shim/scheduler_mock_test.go
+++ b/pkg/shim/scheduler_mock_test.go
@@ -342,7 +342,6 @@ func createUpdateRequestForNewNode(nodeID string, 
nodeLabels map[string]string,
        nodeInfo := &si.NodeInfo{
                NodeID:              nodeID,
                SchedulableResource: capacity,
-               OccupiedResource:    occupied,
                Attributes: map[string]string{
                        constants.DefaultNodeAttributeHostNameKey: nodeID,
                        constants.DefaultNodeAttributeRackNameKey: 
constants.DefaultRackName,


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to