chenyulin0719 commented on code in PR #762:
URL: https://github.com/apache/yunikorn-core/pull/762#discussion_r1443138384
##########
pkg/webservice/handlers_test.go:
##########
@@ -815,6 +824,131 @@ func addAndConfirmApplicationExists(t *testing.T,
partitionName string, partitio
return app
}
+func TestGetPartitionNodesUtilJSON(t *testing.T) {
+ // setup
+ partition := setup(t, configDefault, 1)
+ appID := "app1"
+ node1ID := "node-1"
+ node2ID := "node-2"
+ node3ID := "node-3"
+
+ // create test nodes
+ node1 := addNode(t, partition, node1ID,
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000}))
+ node2 := addNode(t, partition, node2ID,
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.Memory:
1000, siCommon.CPU: 1000, "GPU": 10}))
+ addNode(t, partition, node3ID,
resources.NewResourceFromMap(map[string]resources.Quantity{siCommon.CPU: 1000}))
+
+ // create test allocations
+ addAllocatedResource(t, node1, "alloc-1", appID,
map[string]resources.Quantity{siCommon.Memory: 500, siCommon.CPU: 300})
+ addAllocatedResource(t, node2, "alloc-2", appID,
map[string]resources.Quantity{siCommon.Memory: 300, siCommon.CPU: 500, "GPU":
5})
+
+ // assert partition nodes utilization
+ result := getPartitionNodesUtilJSON(partition)
+ assert.Equal(t, result.ClusterId, rmID)
+ assert.Equal(t, result.Partition, "default")
+ assert.Equal(t, len(result.NodesUtilList), 3, "Should have 3 resource
types(CPU/Memory/GPU) in the list.")
+
+ // two nodes advertise memory: must show up in the list
+ memoryNodesUtil := getNodesUtilByType(t, result.NodesUtilList,
siCommon.Memory)
+ assert.Equal(t, memoryNodesUtil.NodesUtil[2].NumOfNodes, int64(1))
+ assert.Equal(t, memoryNodesUtil.NodesUtil[4].NumOfNodes, int64(1))
+ assert.Equal(t, memoryNodesUtil.NodesUtil[2].NodeNames[0], node2ID)
+ assert.Equal(t, memoryNodesUtil.NodesUtil[4].NodeNames[0], node1ID)
+
+ // three nodes advertise cpu: must show up in the list
+ cpuNodesUtil := getNodesUtilByType(t, result.NodesUtilList,
siCommon.CPU)
+ assert.Equal(t, cpuNodesUtil.NodesUtil[0].NumOfNodes, int64(1))
+ assert.Equal(t, cpuNodesUtil.NodesUtil[0].NodeNames[0], node3ID)
+ assert.Equal(t, cpuNodesUtil.NodesUtil[2].NumOfNodes, int64(1))
+ assert.Equal(t, cpuNodesUtil.NodesUtil[2].NodeNames[0], node1ID)
+ assert.Equal(t, cpuNodesUtil.NodesUtil[4].NumOfNodes, int64(1))
+ assert.Equal(t, cpuNodesUtil.NodesUtil[4].NodeNames[0], node2ID)
+
+ // one node advertise GPU: must show up in the list
+ gpuNodesUtil := getNodesUtilByType(t, result.NodesUtilList, "GPU")
+ assert.Equal(t, gpuNodesUtil.NodesUtil[4].NumOfNodes, int64(1))
+ assert.Equal(t, gpuNodesUtil.NodesUtil[4].NodeNames[0], node2ID)
+}
+
+func TestGetNodeUtilisations(t *testing.T) {
+ // setup
+ NewWebApp(&scheduler.ClusterContext{}, nil)
+ req, err := http.NewRequest("GET",
"/ws/v1/scheduler/node-utilizations", strings.NewReader(""))
+ assert.NilError(t, err, "Get node utilisations Handler request failed")
+ resp := &MockResponseWriter{}
+
+ getNodeUtilisations(resp, req)
+ var partitionNodesUtilDAOInfo []*dao.PartitionNodesUtilDAOInfo
+ err = json.Unmarshal(resp.outputBytes, &partitionNodesUtilDAOInfo)
+ assert.NilError(t, err, "should decode a empty list of
*dao.PartitionNodesUtilDAOInfo")
+ assert.Equal(t, len(partitionNodesUtilDAOInfo), 0)
+
+ // setup partitions
+ schedulerContext, err = scheduler.NewClusterContext(rmID, policyGroup,
[]byte(configMultiPartitions))
+ assert.NilError(t, err, "Error when load clusterInfo from config")
+ schedulerContext.GetPartition("default")
+ defaultPartition :=
schedulerContext.GetPartition(common.GetNormalizedPartitionName("default",
rmID))
+ gpuPartition :=
schedulerContext.GetPartition(common.GetNormalizedPartitionName("gpu", rmID))
+
+ // add nodes to partitions
+ node1 := addNode(t, defaultPartition, "node-1",
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 10}))
+ node2 := addNode(t, defaultPartition, "node-2",
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 10,
"vcore": 5}))
+ node3 := addNode(t, defaultPartition, "node-3",
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 20,
"vcore": 15}))
+ node4 := addNode(t, gpuPartition, "node-4",
resources.NewResourceFromMap(map[string]resources.Quantity{"gpu": 10}))
+ // add allocatedResource to nodes
+ addAllocatedResource(t, node1, "alloc-1", "app-1",
map[string]resources.Quantity{"memory": 1})
+ addAllocatedResource(t, node2, "alloc-1", "app-1",
map[string]resources.Quantity{"memory": 1, "vcore": 1})
+ addAllocatedResource(t, node3, "alloc-1", "app-1",
map[string]resources.Quantity{"memory": 1, "vcore": 1})
+ addAllocatedResource(t, node4, "alloc-1", "app-1",
map[string]resources.Quantity{"gpu": 1})
+
+ // get nodes utilizations
+ getNodeUtilisations(resp, req)
+ err = json.Unmarshal(resp.outputBytes, &partitionNodesUtilDAOInfo)
+ assert.NilError(t, err, "should decode a list of
*dao.PartitionNodesUtilDAOInfo")
+ assert.Equal(t, len(partitionNodesUtilDAOInfo), 2)
+ assert.Equal(t, partitionNodesUtilDAOInfo[0].ClusterId, rmID)
Review Comment:
Sorry for causing confusion. Just added the checking for second element.
8440789aeb44ac0bccf81f57afad5e6d8015b2af
My original thought is that the rmID value is merely a pass-in value and
there is no difference among the two elements, so I just did a simple check.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]