Repository: mesos Updated Branches: refs/heads/1.3.x 230a08c73 -> 904729ed3
Added sorter test for allocation queries about inactive clients. This behavior has not changed, but was not covered by the existing tests. Review: https://reviews.apache.org/r/59481/ Project: http://git-wip-us.apache.org/repos/asf/mesos/repo Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/7a8cd22b Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/7a8cd22b Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/7a8cd22b Branch: refs/heads/1.3.x Commit: 7a8cd22b1df566a0213483a77aeb626266618ff2 Parents: 230a08c Author: Neil Conway <neil.con...@gmail.com> Authored: Tue May 23 10:36:12 2017 -0700 Committer: Neil Conway <neil.con...@gmail.com> Committed: Wed May 24 14:46:49 2017 -0700 ---------------------------------------------------------------------- src/tests/sorter_tests.cpp | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/mesos/blob/7a8cd22b/src/tests/sorter_tests.cpp ---------------------------------------------------------------------- diff --git a/src/tests/sorter_tests.cpp b/src/tests/sorter_tests.cpp index 8e7ff79..d8d6140 100644 --- a/src/tests/sorter_tests.cpp +++ b/src/tests/sorter_tests.cpp @@ -989,6 +989,43 @@ TEST(SorterTest, UpdateAllocationNestedClient) } +// This test checks that the sorter correctly reports allocation +// information about inactive clients. +TEST(SorterTest, AllocationForInactiveClient) +{ + DRFSorter sorter; + + SlaveID slaveId; + slaveId.set_value("agentId"); + + sorter.add(slaveId, Resources::parse("cpus:10;mem:10").get()); + + sorter.add("a"); + sorter.add("b"); + + // Leave client "a" inactive. + sorter.activate("b"); + + sorter.allocated("a", slaveId, Resources::parse("cpus:2;mem:2").get()); + sorter.allocated("b", slaveId, Resources::parse("cpus:3;mem:3").get()); + + hashmap<string, Resources> clientAllocation = sorter.allocation(slaveId); + EXPECT_EQ(2u, clientAllocation.size()); + EXPECT_EQ(Resources::parse("cpus:2;mem:2").get(), clientAllocation.at("a")); + EXPECT_EQ(Resources::parse("cpus:3;mem:3").get(), clientAllocation.at("b")); + + hashmap<SlaveID, Resources> agentAllocation1 = sorter.allocation("a"); + EXPECT_EQ(1u, agentAllocation1.size()); + EXPECT_EQ( + Resources::parse("cpus:2;mem:2").get(), agentAllocation1.at(slaveId)); + + hashmap<SlaveID, Resources> agentAllocation2 = sorter.allocation("b"); + EXPECT_EQ(1u, agentAllocation2.size()); + EXPECT_EQ( + Resources::parse("cpus:3;mem:3").get(), agentAllocation2.at(slaveId)); +} + + // We aggregate resources from multiple slaves into the sorter. // Since non-scalar resources don't aggregate well across slaves, // we need to keep track of the SlaveIDs of the resources. This