This is an automated email from the ASF dual-hosted git repository.
bmahler pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git
The following commit(s) were added to refs/heads/master by this push:
new 3f753e7 Fixed allocator performance issue in updateAllocation().
3f753e7 is described below
commit 3f753e77a9e00b884000b59df8797e5422da8ccd
Author: Andrei Sekretenko <[email protected]>
AuthorDate: Wed Oct 30 19:45:33 2019 -0400
Fixed allocator performance issue in updateAllocation().
This patch addresses poor performance of
`HierarchicalAllocatorProcess::updateAllocation()` for agents with
a huge number of non-addable resources in a many-framework case
(see MESOS-10015).
Sorter methods for totals tracking that modify `Resources` of an agent
in the Sorter are replaced with methods that add/remove resource
quantities of an agent as a whole (which was actually the only use case
of the old methods). Thus, subtracting/adding `Resources` of a whole
agent no longer occurs when updating resources of an agent in a Sorter.
Further, this patch completely removes agent resource tracking logic
from the random sorter (which by itself makes no use of them) by
implementing cluster totals tracking in the allocator.
Results of `*BENCHMARK_WithReservationParam.UpdateAllocation*`
(for the DRF sorter):
Master:
Agent resources size: 200 (50 frameworks)
Made 20 reserve and unreserve operations in 2.08586secs
Agent resources size: 400 (100 frameworks)
Made 20 reserve and unreserve operations in 13.8449005secs
Agent resources size: 800 (200 frameworks)
Made 20 reserve and unreserve operations in 2.19253121188333mins
Master + this patch:
Agent resources size: 200 (50 frameworks)
Made 20 reserve and unreserve operations in 468.482366ms
Agent resources size: 400 (100 frameworks)
Made 20 reserve and unreserve operations in 925.725947ms
Agent resources size: 800 (200 frameworks)
Made 20 reserve and unreserve operations in 2.110337109secs
...
Agent resources size: 6400 (1600 frameworks)
Made 20 reserve and unreserve operations in 1.50141861756667mins
Review: https://reviews.apache.org/r/71646/
---
src/master/allocator/mesos/hierarchical.cpp | 46 +++-
src/master/allocator/mesos/hierarchical.hpp | 3 +
src/master/allocator/mesos/sorter/drf/sorter.cpp | 74 ++----
src/master/allocator/mesos/sorter/drf/sorter.hpp | 23 +-
.../allocator/mesos/sorter/random/sorter.cpp | 57 -----
.../allocator/mesos/sorter/random/sorter.hpp | 26 +--
src/master/allocator/mesos/sorter/sorter.hpp | 20 +-
src/tests/sorter_tests.cpp | 259 +++++++--------------
8 files changed, 177 insertions(+), 331 deletions(-)
diff --git a/src/master/allocator/mesos/hierarchical.cpp
b/src/master/allocator/mesos/hierarchical.cpp
index 21010de..0df319b 100644
--- a/src/master/allocator/mesos/hierarchical.cpp
+++ b/src/master/allocator/mesos/hierarchical.cpp
@@ -902,10 +902,14 @@ void HierarchicalAllocatorProcess::addSlave(
roleTree.trackReservations(total.reserved());
- roleSorter->add(slaveId, total);
+ const ResourceQuantities agentScalarQuantities =
+ ResourceQuantities::fromScalarResources(total.scalars());
+
+ totalScalarQuantities += agentScalarQuantities;
+ roleSorter->addSlave(slaveId, agentScalarQuantities);
foreachvalue (const Owned<Sorter>& sorter, frameworkSorters) {
- sorter->add(slaveId, total);
+ sorter->addSlave(slaveId, agentScalarQuantities);
}
foreachpair (const FrameworkID& frameworkId,
@@ -981,13 +985,19 @@ void HierarchicalAllocatorProcess::removeSlave(
untrackAllocatedResources(slaveId, frameworkId, resources);
}
- roleSorter->remove(slaveId, slave.getTotal());
+ roleSorter->removeSlave(slaveId);
foreachvalue (const Owned<Sorter>& sorter, frameworkSorters) {
- sorter->remove(slaveId, slave.getTotal());
+ sorter->removeSlave(slaveId);
}
roleTree.untrackReservations(slave.getTotal().reserved());
+
+ const ResourceQuantities agentScalarQuantities =
+ ResourceQuantities::fromScalarResources(slave.getTotal().scalars());
+
+ CHECK_CONTAINS(totalScalarQuantities, agentScalarQuantities);
+ totalScalarQuantities -= agentScalarQuantities;
}
slaves.erase(slaveId);
@@ -1965,7 +1975,7 @@ void HierarchicalAllocatorProcess::__generateOffers()
// allocated resources -
// unallocated reservations -
// unallocated revocable resources
- ResourceQuantities availableHeadroom = roleSorter->totalScalarQuantities();
+ ResourceQuantities availableHeadroom = totalScalarQuantities;
// NOTE: The role sorter does not return aggregated allocation
// information whereas `reservationScalarQuantities` does, so
@@ -2738,7 +2748,7 @@ double
HierarchicalAllocatorProcess::_resources_offered_or_allocated(
double HierarchicalAllocatorProcess::_resources_total(
const string& resource)
{
- return roleSorter->totalScalarQuantities().get(resource).value();
+ return totalScalarQuantities.get(resource).value();
}
@@ -2845,7 +2855,9 @@ void
HierarchicalAllocatorProcess::trackFrameworkUnderRole(
frameworkSorter->initialize(options.fairnessExcludeResourceNames);
foreachvalue (const Slave& slave, slaves) {
- frameworkSorter->add(slave.info.id(), slave.getTotal());
+ frameworkSorter->addSlave(
+ slave.info.id(),
+ ResourceQuantities::fromScalarResources(slave.getTotal().scalars()));
}
}
@@ -2904,13 +2916,23 @@ bool HierarchicalAllocatorProcess::updateSlaveTotal(
roleTree.untrackReservations(oldTotal.reserved());
roleTree.trackReservations(total.reserved());
- // Update the totals in the sorters.
- roleSorter->remove(slaveId, oldTotal);
- roleSorter->add(slaveId, total);
+ // Update the total in the allocator and totals in the sorters.
+ const ResourceQuantities oldAgentScalarQuantities =
+ ResourceQuantities::fromScalarResources(oldTotal.scalars());
+
+ const ResourceQuantities agentScalarQuantities =
+ ResourceQuantities::fromScalarResources(total.scalars());
+
+ CHECK_CONTAINS(totalScalarQuantities, oldAgentScalarQuantities);
+ totalScalarQuantities -= oldAgentScalarQuantities;
+ totalScalarQuantities += agentScalarQuantities;
+
+ roleSorter->removeSlave(slaveId);
+ roleSorter->addSlave(slaveId, agentScalarQuantities);
foreachvalue (const Owned<Sorter>& sorter, frameworkSorters) {
- sorter->remove(slaveId, oldTotal);
- sorter->add(slaveId, total);
+ sorter->removeSlave(slaveId);
+ sorter->addSlave(slaveId, agentScalarQuantities);
}
return true;
diff --git a/src/master/allocator/mesos/hierarchical.hpp
b/src/master/allocator/mesos/hierarchical.hpp
index 9d0fbe7..6454cda 100644
--- a/src/master/allocator/mesos/hierarchical.hpp
+++ b/src/master/allocator/mesos/hierarchical.hpp
@@ -757,6 +757,9 @@ protected:
hashmap<SlaveID, Slave> slaves;
+ // Total scalar resource quantities on all agents.
+ ResourceQuantities totalScalarQuantities;
+
RoleTree roleTree;
// A set of agents that are kept as allocation candidates. Events
diff --git a/src/master/allocator/mesos/sorter/drf/sorter.cpp
b/src/master/allocator/mesos/sorter/drf/sorter.cpp
index ef79083..a1b0e25 100644
--- a/src/master/allocator/mesos/sorter/drf/sorter.cpp
+++ b/src/master/allocator/mesos/sorter/drf/sorter.cpp
@@ -445,68 +445,36 @@ Resources DRFSorter::allocation(
}
-const ResourceQuantities& DRFSorter::totalScalarQuantities() const
+void DRFSorter::addSlave(
+ const SlaveID& slaveId,
+ const ResourceQuantities& scalarQuantities)
{
- return total_.totals;
-}
+ bool inserted = total_.agentResourceQuantities.emplace(
+ slaveId, scalarQuantities).second;
+ CHECK(inserted) << "Attempted to add already added agent " << slaveId;
-void DRFSorter::add(const SlaveID& slaveId, const Resources& resources)
-{
- if (!resources.empty()) {
- // Add shared resources to the total quantities when the same
- // resources don't already exist in the total.
- const Resources newShared = resources.shared()
- .filter([this, slaveId](const Resource& resource) {
- return !total_.resources[slaveId].contains(resource);
- });
-
- total_.resources[slaveId] += resources;
-
- const ResourceQuantities scalarQuantities =
- ResourceQuantities::fromScalarResources(
- (resources.nonShared() + newShared).scalars());
-
- total_.totals += scalarQuantities;
-
- // We have to recalculate all shares when the total resources
- // change, but we put it off until `sort` is called so that if
- // something else changes before the next allocation we don't
- // recalculate everything twice.
- dirty = true;
- }
+ total_.totals += scalarQuantities;
+
+ // We have to recalculate all shares when the total resources
+ // change, but we put it off until `sort` is called so that if
+ // something else changes before the next allocation we don't
+ // recalculate everything twice.
+ dirty = true;
}
-void DRFSorter::remove(const SlaveID& slaveId, const Resources& resources)
+void DRFSorter::removeSlave(const SlaveID& slaveId)
{
- if (!resources.empty()) {
- CHECK(total_.resources.contains(slaveId));
- CHECK(total_.resources[slaveId].contains(resources))
- << total_.resources[slaveId] << " does not contain " << resources;
-
- total_.resources[slaveId] -= resources;
+ const auto agent = total_.agentResourceQuantities.find(slaveId);
+ CHECK(agent != total_.agentResourceQuantities.end())
+ << "Attempted to remove unknown agent " << slaveId;
- // Remove shared resources from the total quantities when there
- // are no instances of same resources left in the total.
- const Resources absentShared = resources.shared()
- .filter([this, slaveId](const Resource& resource) {
- return !total_.resources[slaveId].contains(resource);
- });
+ CHECK_CONTAINS(total_.totals, agent->second);
+ total_.totals -= agent->second;
- const ResourceQuantities scalarQuantities =
- ResourceQuantities::fromScalarResources(
- (resources.nonShared() + absentShared).scalars());
-
- CHECK(total_.totals.contains(scalarQuantities));
- total_.totals -= scalarQuantities;
-
- if (total_.resources[slaveId].empty()) {
- total_.resources.erase(slaveId);
- }
-
- dirty = true;
- }
+ total_.agentResourceQuantities.erase(agent);
+ dirty = true;
}
diff --git a/src/master/allocator/mesos/sorter/drf/sorter.hpp
b/src/master/allocator/mesos/sorter/drf/sorter.hpp
index 3f6c741..03a6be0 100644
--- a/src/master/allocator/mesos/sorter/drf/sorter.hpp
+++ b/src/master/allocator/mesos/sorter/drf/sorter.hpp
@@ -94,11 +94,11 @@ public:
const std::string& clientPath,
const SlaveID& slaveId) const override;
- const ResourceQuantities& totalScalarQuantities() const override;
-
- void add(const SlaveID& slaveId, const Resources& resources) override;
+ void addSlave(
+ const SlaveID& slaveId,
+ const ResourceQuantities& scalarQuantities) override;
- void remove(const SlaveID& slaveId, const Resources& resources) override;
+ void removeSlave(const SlaveID& slaveId) override;
std::vector<std::string> sort() override;
@@ -148,18 +148,19 @@ private:
// Total resources.
struct Total
{
- // We need to keep track of the resources (and not just scalar
- // quantities) to account for multiple copies of the same shared
- // resources. We need to ensure that we do not update the scalar
- // quantities for shared resources when the change is only in the
- // number of copies in the sorter.
- hashmap<SlaveID, Resources> resources;
-
// We keep the aggregated scalar resource quantities to speed
// up share calculation. Note, resources shared count are ignored.
// Because sharedness inherently refers to the identities of resources
// and not quantities.
ResourceQuantities totals;
+
+ // We keep track of per-agent resource quantities to handle agent removal.
+ //
+ // Note that the only way to change the stored resource quantities
+ // is to remove the agent from the sorter and add it with new resources.
+ // Thus, when a resource shared count on an agent changes, multiple copies
+ // of the same shared resource are still accounted for exactly once.
+ hashmap<SlaveID, const ResourceQuantities> agentResourceQuantities;
} total_;
// Metrics are optionally exposed by the sorter.
diff --git a/src/master/allocator/mesos/sorter/random/sorter.cpp
b/src/master/allocator/mesos/sorter/random/sorter.cpp
index 86aeb1b..8c5d4e8 100644
--- a/src/master/allocator/mesos/sorter/random/sorter.cpp
+++ b/src/master/allocator/mesos/sorter/random/sorter.cpp
@@ -381,63 +381,6 @@ Resources RandomSorter::allocation(
}
-const ResourceQuantities& RandomSorter::totalScalarQuantities() const
-{
- return total_.totals;
-}
-
-
-void RandomSorter::add(const SlaveID& slaveId, const Resources& resources)
-{
- if (!resources.empty()) {
- // Add shared resources to the total quantities when the same
- // resources don't already exist in the total.
- const Resources newShared = resources.shared()
- .filter([this, slaveId](const Resource& resource) {
- return !total_.resources[slaveId].contains(resource);
- });
-
- total_.resources[slaveId] += resources;
-
- const ResourceQuantities scalarQuantities =
- ResourceQuantities::fromScalarResources(
- (resources.nonShared() + newShared).scalars());
-
- total_.totals += scalarQuantities;
- }
-}
-
-
-void RandomSorter::remove(const SlaveID& slaveId, const Resources& resources)
-{
- if (!resources.empty()) {
- CHECK(total_.resources.contains(slaveId));
- CHECK(total_.resources[slaveId].contains(resources))
- << total_.resources[slaveId] << " does not contain " << resources;
-
- total_.resources[slaveId] -= resources;
-
- // Remove shared resources from the total quantities when there
- // are no instances of same resources left in the total.
- const Resources absentShared = resources.shared()
- .filter([this, slaveId](const Resource& resource) {
- return !total_.resources[slaveId].contains(resource);
- });
-
- const ResourceQuantities scalarQuantities =
- ResourceQuantities::fromScalarResources(
- (resources.nonShared() + absentShared).scalars());
-
- CHECK(total_.totals.contains(scalarQuantities));
- total_.totals -= scalarQuantities;
-
- if (total_.resources[slaveId].empty()) {
- total_.resources.erase(slaveId);
- }
- }
-}
-
-
vector<string> RandomSorter::sort()
{
pair<vector<string>, vector<double>> clientsAndWeights =
diff --git a/src/master/allocator/mesos/sorter/random/sorter.hpp
b/src/master/allocator/mesos/sorter/random/sorter.hpp
index a3097be..79fe2bf 100644
--- a/src/master/allocator/mesos/sorter/random/sorter.hpp
+++ b/src/master/allocator/mesos/sorter/random/sorter.hpp
@@ -94,11 +94,12 @@ public:
const std::string& clientPath,
const SlaveID& slaveId) const override;
- const ResourceQuantities& totalScalarQuantities() const override;
-
- void add(const SlaveID& slaveId, const Resources& resources) override;
+ // NOTE: addSlave/removeSlave is a no-op for this sorter.
+ void addSlave(
+ const SlaveID& slaveId,
+ const ResourceQuantities& scalarQuantities) override {};
- void remove(const SlaveID& slaveId, const Resources& resources) override;
+ void removeSlave(const SlaveID& slaveId) override {};
// This will perform a weighted random shuffle on each call.
//
@@ -178,23 +179,6 @@ private:
// rooted at that path. This hashmap might include weights for paths
// that are not currently in the sorter tree.
hashmap<std::string, double> weights;
-
- // Total resources.
- struct Total
- {
- // We need to keep track of the resources (and not just scalar
- // quantities) to account for multiple copies of the same shared
- // resources. We need to ensure that we do not update the scalar
- // quantities for shared resources when the change is only in the
- // number of copies in the sorter.
- hashmap<SlaveID, Resources> resources;
-
- // We keep the aggregated scalar resource quantities to speed
- // up share calculation. Note, resources shared count are ignored.
- // Because sharedness inherently refers to the identities of resources
- // and not quantities.
- ResourceQuantities totals;
- } total_;
};
diff --git a/src/master/allocator/mesos/sorter/sorter.hpp
b/src/master/allocator/mesos/sorter/sorter.hpp
index 6b6b4a1..f97a546 100644
--- a/src/master/allocator/mesos/sorter/sorter.hpp
+++ b/src/master/allocator/mesos/sorter/sorter.hpp
@@ -120,15 +120,19 @@ public:
const std::string& client,
const SlaveID& slaveId) const = 0;
- // Returns the total scalar resource quantities in this sorter.
- virtual const ResourceQuantities& totalScalarQuantities() const = 0;
-
- // Add resources to the total pool of resources this
- // Sorter should consider.
- virtual void add(const SlaveID& slaveId, const Resources& resources) = 0;
+ // Add/remove total scalar resource quantities of an agent to/from the
+ // total pool of resources this Sorter should consider.
+ //
+ // NOTE: Updating resources of an agent in the Sorter is done by first
calling
+ // `removeSlave()` and then `addSlave()` with new resource quantities.
+ //
+ // NOTE: Attempt to add the same agent twice or remove an agent not added
+ // to the Sorter may crash the program.
+ virtual void addSlave(
+ const SlaveID& slaveId,
+ const ResourceQuantities& scalarQuantities) = 0;
- // Remove resources from the total pool.
- virtual void remove(const SlaveID& slaveId, const Resources& resources) = 0;
+ virtual void removeSlave(const SlaveID& slaveId) = 0;
// Returns all of the clients in the order that they should
// be allocated to, according to this Sorter's policy.
diff --git a/src/tests/sorter_tests.cpp b/src/tests/sorter_tests.cpp
index d7fdee8..472691d 100644
--- a/src/tests/sorter_tests.cpp
+++ b/src/tests/sorter_tests.cpp
@@ -243,8 +243,7 @@ TEST(DRFSorterTest, DRF)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
EXPECT_EQ(vector<string>({}), sorter.sort());
@@ -287,16 +286,14 @@ TEST(DRFSorterTest, DRF)
sorter.activate("e");
sorter.allocated("e", slaveId, eResources);
- Resources removedResources = Resources::parse("cpus:50;mem:0").get();
- sorter.remove(slaveId, removedResources);
- // total resources is now cpus = 50, mem = 100
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:50;mem:100"));
// shares: b = .04, c = .02, d = .06, e = .05
EXPECT_EQ(vector<string>({"c", "b", "e", "d"}), sorter.sort());
- Resources addedResources = Resources::parse("cpus:0;mem:100").get();
- sorter.add(slaveId, addedResources);
- // total resources is now cpus = 50, mem = 200
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:50;mem:200"));
Resources fResources = Resources::parse("cpus:5;mem:1").get();
sorter.add("f");
@@ -336,7 +333,7 @@ TEST(DRFSorterTest, WDRF)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.activate("a");
@@ -396,9 +393,7 @@ TEST(DRFSorterTest, UpdateWeight)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
-
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.activate("a");
@@ -428,9 +423,7 @@ TEST(DRFSorterTest, AllocationCountTieBreak)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
-
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.add("b");
@@ -511,8 +504,7 @@ TEST(DRFSorterTest, ShallowHierarchy)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a/a");
sorter.activate("a/a");
@@ -554,16 +546,14 @@ TEST(DRFSorterTest, ShallowHierarchy)
sorter.activate("e/e");
sorter.allocated("e/e", slaveId, eResources);
- Resources removedResources = Resources::parse("cpus:50;mem:0").get();
- sorter.remove(slaveId, removedResources);
- // total resources is now cpus = 50, mem = 100
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:50;mem:100"));
// shares: b/b = .04, c/c = .02, d/d = .06, e/e = .05
EXPECT_EQ(vector<string>({"c/c", "b/b", "e/e", "d/d"}), sorter.sort());
- Resources addedResources = Resources::parse("cpus:0;mem:100").get();
- sorter.add(slaveId, addedResources);
- // total resources is now cpus = 50, mem = 200
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:50;mem:200"));
Resources fResources = Resources::parse("cpus:5;mem:1").get();
sorter.add("f/f");
@@ -606,8 +596,7 @@ TEST(DRFSorterTest, DeepHierarchy)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a/a/a/a/a");
sorter.activate("a/a/a/a/a");
@@ -649,17 +638,15 @@ TEST(DRFSorterTest, DeepHierarchy)
sorter.activate("e/e/e/e/e/e");
sorter.allocated("e/e/e/e/e/e", slaveId, eResources);
- Resources removedResources = Resources::parse("cpus:50;mem:0").get();
- sorter.remove(slaveId, removedResources);
- // total resources is now cpus = 50, mem = 100
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:50;mem:100"));
// shares: b/b/b/b = .04, c/c/c = .02, d/d = .06, e/e/e/e/e/e = .05
EXPECT_EQ(vector<string>({"c/c/c", "b/b/b/b", "e/e/e/e/e/e", "d/d"}),
sorter.sort());
- Resources addedResources = Resources::parse("cpus:0;mem:100").get();
- sorter.add(slaveId, addedResources);
- // total resources is now cpus = 50, mem = 200
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:50;mem:200"));
Resources fResources = Resources::parse("cpus:5;mem:1").get();
sorter.add("f/f");
@@ -702,8 +689,7 @@ TEST(DRFSorterTest, HierarchicalAllocation)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.add("b/c");
@@ -816,8 +802,7 @@ TEST(DRFSorterTest, HierarchicalIterationOrder)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resources totalResources = Resources::parse("cpus:100;mem:100").get();
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a/b");
sorter.add("c");
@@ -864,7 +849,7 @@ TEST(DRFSorterTest, AddChildToLeaf)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.activate("a");
@@ -935,7 +920,7 @@ TEST(DRFSorterTest, AddChildToInternal)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("x/a");
sorter.activate("x/a");
@@ -978,7 +963,7 @@ TEST(DRFSorterTest, AddChildToInactiveLeaf)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.activate("a");
@@ -1013,7 +998,7 @@ TYPED_TEST(CommonSorterTest, RemoveLeafCollapseParent)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.activate("a");
@@ -1058,7 +1043,7 @@ TYPED_TEST(CommonSorterTest,
RemoveLeafCollapseParentInactive)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.add("a");
sorter.activate("a");
@@ -1099,7 +1084,7 @@ TEST(DRFSorterTest, ChangeWeightOnSubtree)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get());
+ sorter.addSlave(slaveId,
*ResourceQuantities::fromString("cpus:100;mem:100"));
sorter.updateWeight("b", 3);
sorter.updateWeight("a", 2);
@@ -1165,9 +1150,9 @@ TEST(DRFSorterTest, SplitResourceShares)
disk2.mutable_disk()->mutable_persistence()->set_id("ID2");
disk2.mutable_disk()->mutable_volume()->set_container_path("data");
- sorter.add(
+ sorter.addSlave(
slaveId,
- Resources::parse("cpus:100;mem:100;disk:95").get() + disk1 + disk2);
+ *ResourceQuantities::fromString("cpus:100;mem:100;disk:100"));
// Now, allocate resources to "a" and "b". Note that "b" will have
// more disk if the shares are accounted correctly!
@@ -1192,7 +1177,8 @@ TYPED_TEST(CommonSorterTest, UpdateAllocation)
sorter.activate("a");
sorter.activate("b");
- sorter.add(slaveId, Resources::parse("cpus:10;mem:10;disk:10").get());
+ sorter.addSlave(
+ slaveId, *ResourceQuantities::fromString("cpus:10;mem:10;disk:10"));
sorter.allocated(
"a", slaveId, Resources::parse("cpus:10;mem:10;disk:10").get());
@@ -1233,7 +1219,7 @@ TYPED_TEST(CommonSorterTest, UpdateAllocation)
sorter.add("c");
sorter.activate("c");
- sorter.add(slaveId, resourcesC);
+ sorter.addSlave(slaveId,
ResourceQuantities::fromScalarResources(resourcesC));
sorter.allocated("c", slaveId2, resourcesC);
sorter.update("c", slaveId2, resourcesC, Resources());
@@ -1253,7 +1239,8 @@ TYPED_TEST(CommonSorterTest, UpdateAllocationNestedClient)
sorter.activate("a/x");
sorter.activate("b/y");
- sorter.add(slaveId, Resources::parse("cpus:10;mem:10;disk:10").get());
+ sorter.addSlave(
+ slaveId, *ResourceQuantities::fromString("cpus:10;mem:10;disk:10"));
sorter.allocated(
"a/x", slaveId, Resources::parse("cpus:10;mem:10;disk:10").get());
@@ -1295,7 +1282,7 @@ TYPED_TEST(CommonSorterTest, AllocationForInactiveClient)
SlaveID slaveId;
slaveId.set_value("agentId");
- sorter.add(slaveId, Resources::parse("cpus:10;mem:10").get());
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:10;mem:10"));
sorter.add("a");
sorter.add("b");
@@ -1348,11 +1335,14 @@ TYPED_TEST(CommonSorterTest, MultipleSlaves)
sorter.add("framework");
sorter.activate("framework");
- Resources slaveResources =
+ const Resources slaveResources =
Resources::parse("cpus:2;mem:512;ports:[31000-32000]").get();
- sorter.add(slaveA, slaveResources);
- sorter.add(slaveB, slaveResources);
+ const ResourceQuantities slaveResourceQuantities =
+ ResourceQuantities::fromScalarResources(slaveResources.scalars());
+
+ sorter.addSlave(slaveA, slaveResourceQuantities);
+ sorter.addSlave(slaveB, slaveResourceQuantities);
sorter.allocated("framework", slaveA, slaveResources);
sorter.allocated("framework", slaveB, slaveResources);
@@ -1361,14 +1351,10 @@ TYPED_TEST(CommonSorterTest, MultipleSlaves)
EXPECT_EQ(slaveResources, sorter.allocation("framework", slaveA));
EXPECT_EQ(slaveResources, sorter.allocation("framework", slaveB));
- EXPECT_EQ(
- ResourceQuantities::fromScalarResources(slaveResources.scalars()) +
- ResourceQuantities::fromScalarResources(slaveResources.scalars()),
+ EXPECT_EQ(slaveResourceQuantities + slaveResourceQuantities,
sorter.allocationScalarQuantities("framework"));
- EXPECT_EQ(
- ResourceQuantities::fromScalarResources(slaveResources.scalars()) +
- ResourceQuantities::fromScalarResources(slaveResources.scalars()),
+ EXPECT_EQ(slaveResourceQuantities + slaveResourceQuantities,
sorter.allocationScalarQuantities());
}
@@ -1391,11 +1377,14 @@ TYPED_TEST(CommonSorterTest,
MultipleSlavesUpdateAllocation)
sorter.add("framework");
sorter.activate("framework");
- Resources slaveResources =
+ const Resources slaveResources =
Resources::parse("cpus:2;mem:512;disk:10;ports:[31000-32000]").get();
- sorter.add(slaveA, slaveResources);
- sorter.add(slaveB, slaveResources);
+ const ResourceQuantities slaveResourceQuantities =
+ ResourceQuantities::fromScalarResources(slaveResources.scalars());
+
+ sorter.addSlave(slaveA, slaveResourceQuantities);
+ sorter.addSlave(slaveB, slaveResourceQuantities);
sorter.allocated("framework", slaveA, slaveResources);
sorter.allocated("framework", slaveB, slaveResources);
@@ -1443,7 +1432,7 @@ TEST(DRFSorterTest, UpdateTotal)
sorter.activate("a");
sorter.activate("b");
- sorter.add(slaveId, Resources::parse("cpus:10;mem:100").get());
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:10;mem:100"));
// Dominant share of "a" is 0.2 (cpus).
sorter.allocated(
@@ -1457,8 +1446,8 @@ TEST(DRFSorterTest, UpdateTotal)
// Update the total resources by removing the previous total and
// adding back the new total.
- sorter.remove(slaveId, Resources::parse("cpus:10;mem:100").get());
- sorter.add(slaveId, Resources::parse("cpus:100;mem:10").get());
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(slaveId, *ResourceQuantities::fromString("cpus:100;mem:10"));
// Now the dominant share of "a" is 0.1 (mem) and "b" is 0.2 (mem),
// which should change the sort order.
@@ -1483,8 +1472,8 @@ TEST(DRFSorterTest, MultipleSlavesUpdateTotal)
sorter.activate("a");
sorter.activate("b");
- sorter.add(slaveA, Resources::parse("cpus:5;mem:50").get());
- sorter.add(slaveB, Resources::parse("cpus:5;mem:50").get());
+ sorter.addSlave(slaveA, *ResourceQuantities::fromString("cpus:5;mem:50"));
+ sorter.addSlave(slaveB, *ResourceQuantities::fromString("cpus:5;mem:50"));
// Dominant share of "a" is 0.2 (cpus).
sorter.allocated(
@@ -1496,10 +1485,9 @@ TEST(DRFSorterTest, MultipleSlavesUpdateTotal)
EXPECT_EQ(vector<string>({"b", "a"}), sorter.sort());
- // Update the total resources of slaveA by removing the previous
- // total and adding the new total.
- sorter.remove(slaveA, Resources::parse("cpus:5;mem:50").get());
- sorter.add(slaveA, Resources::parse("cpus:95;mem:50").get());
+ // Update the total resources of slaveA.
+ sorter.removeSlave(slaveA);
+ sorter.addSlave(slaveA, *ResourceQuantities::fromString("cpus:95;mem:50"));
// Now the dominant share of "a" is 0.02 (cpus) and "b" is 0.03
// (mem), which should change the sort order.
@@ -1523,7 +1511,7 @@ TEST(DRFSorterTest, RemoveResources)
sorter.activate("b");
Resources slaveTotal = Resources::parse("cpus", "10", "*").get();
- sorter.add(slaveId, slaveTotal);
+ sorter.addSlave(slaveId,
ResourceQuantities::fromScalarResources(slaveTotal));
// Dominant share of "a" is 0.6 (cpus).
Resources allocatedForA = Resources::parse("cpus", "6", "*").get();
@@ -1536,8 +1524,10 @@ TEST(DRFSorterTest, RemoveResources)
// Remove cpus from the total resources as well as the allocation of "a".
Resources removed = Resources::parse("cpus", "5", "*").get();
- sorter.remove(slaveId, slaveTotal);
- sorter.add(slaveId, slaveTotal - removed);
+ sorter.removeSlave(slaveId);
+ sorter.addSlave(
+ slaveId, ResourceQuantities::fromScalarResources(slaveTotal - removed));
+
sorter.update("a", slaveId, allocatedForA, allocatedForA - removed);
// Now the dominant share of "a" is 0.2 (cpus) and that of "b" is 0.8 (cpus),
@@ -1566,7 +1556,7 @@ TEST(DRFSorterTest, RevocableResources)
revocable.mutable_revocable();
Resources total = Resources::parse("cpus:10;mem:100").get() + revocable;
- sorter.add(slaveId, total);
+ sorter.addSlave(slaveId, ResourceQuantities::fromScalarResources(total));
// Dominant share of "a" is 0.1 (cpus).
Resources a = Resources::parse("cpus:2;mem:1").get();
@@ -1596,15 +1586,13 @@ TEST(DRFSorterTest, SharedResources)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resource sharedDisk = createDiskResource(
+ const Resource sharedDisk = createDiskResource(
"100", "role1", "id1", "path1", None(), true);
- // Set totalResources to have disk of 1000, with disk 100 being shared.
- Resources totalResources = Resources::parse(
- "cpus:100;mem:100;disk(role1):900").get();
- totalResources += sharedDisk;
-
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(
+ slaveId,
+ ResourceQuantities::fromScalarResources(
+ *Resources::parse("cpus:100;mem:100;disk(role1):900") + sharedDisk));
// Verify sort() works when shared resources are in the allocations.
sorter.add("a");
@@ -1650,28 +1638,6 @@ TEST(DRFSorterTest, SharedResources)
// d = .1 (dominant: disk).
EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
- // Verify other basic allocator methods work when shared resources
- // are in the allocations.
- Resources removedResources = Resources::parse("cpus:50;mem:0").get();
- sorter.remove(slaveId, removedResources);
-
- // Total resources is now:
- // cpus:50;mem:100;disk(role1):900;disk(role1)[id1]:100
-
- // Shares: b = .04 (dominant: cpus), c = .1 (dominant: disk),
- // d = .1 (dominant: disk).
- EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
-
- Resources addedResources = Resources::parse("cpus:0;mem:100").get();
- sorter.add(slaveId, addedResources);
-
- // Total resources is now:
- // cpus:50;mem:200;disk(role1):900;disk(role1)[id1]:100
-
- // Shares: b = .04 (dominant: cpus), c = .1 (dominant: disk),
- // d = .1 (dominant: disk).
- EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
-
EXPECT_TRUE(sorter.contains("b"));
EXPECT_FALSE(sorter.contains("a"));
@@ -1690,15 +1656,13 @@ TEST(DRFSorterTest,
SameDominantSharedResourcesAcrossClients)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resource sharedDisk = createDiskResource(
+ const Resource sharedDisk = createDiskResource(
"900", "role1", "id1", "path1", None(), true);
- // Set totalResources to have disk of 1000, with disk 900 being shared.
- Resources totalResources = Resources::parse(
- "cpus:100;mem:100;disk(role1):100").get();
- totalResources += sharedDisk;
-
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(
+ slaveId,
+ ResourceQuantities::fromScalarResources(
+ *Resources::parse("cpus:100;mem:100;disk(role1):100") + sharedDisk));
// Add 2 clients each with the same shared disk, but with varying
// cpus and mem.
@@ -1739,15 +1703,13 @@ TEST(DRFSorterTest, SameSharedResourcesSameClient)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resource sharedDisk = createDiskResource(
+ const Resource sharedDisk = createDiskResource(
"50", "role1", "id1", "path1", None(), true);
- // Set totalResources to have disk of 1000, with disk of 50 being shared.
- Resources totalResources = Resources::parse(
- "cpus:100;mem:100;disk(role1):950").get();
- totalResources += sharedDisk;
-
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(
+ slaveId,
+ ResourceQuantities::fromScalarResources(
+ *Resources::parse("cpus:100;mem:100;disk(role1):950") + sharedDisk));
// Verify sort() works when shared resources are in the allocations.
sorter.add("a");
@@ -1784,15 +1746,13 @@ TEST(DRFSorterTest, SharedResourcesUnallocated)
SlaveID slaveId;
slaveId.set_value("agentId");
- Resource sharedDisk = createDiskResource(
+ const Resource sharedDisk = createDiskResource(
"100", "role1", "id1", "path1", None(), true);
- // Set totalResources to have disk of 1000, with disk 100 being shared.
- Resources totalResources = Resources::parse(
- "cpus:100;mem:100;disk(role1):900").get();
- totalResources += sharedDisk;
-
- sorter.add(slaveId, totalResources);
+ sorter.addSlave(
+ slaveId,
+ ResourceQuantities::fromScalarResources(
+ *Resources::parse("cpus:100;mem:100;disk(role1):900") + sharedDisk));
// Allocate 3 copies of shared resources to client 'a', but allocate no
// shared resource to client 'b'.
@@ -1828,45 +1788,6 @@ TEST(DRFSorterTest, SharedResourcesUnallocated)
}
-// This test verifies that shared resources are removed from the sorter
-// only when all instances of the the same shared resource are removed.
-TYPED_TEST(CommonSorterTest, RemoveSharedResources)
-{
- TypeParam sorter;
-
- SlaveID slaveId;
- slaveId.set_value("agentId");
-
- Resource sharedDisk = createDiskResource(
- "100", "role1", "id1", "path1", None(), true);
-
- sorter.add(
- slaveId, Resources::parse("cpus:100;mem:100;disk(role1):900").get());
-
- ResourceQuantities quantity1 = sorter.totalScalarQuantities();
-
- sorter.add(slaveId, sharedDisk);
- ResourceQuantities quantity2 = sorter.totalScalarQuantities();
-
- EXPECT_EQ(
- CHECK_NOTERROR(ResourceQuantities::fromString("disk:100")),
- quantity2 - quantity1);
-
- sorter.add(slaveId, sharedDisk);
- ResourceQuantities quantity3 = sorter.totalScalarQuantities();
-
- EXPECT_NE(quantity1, quantity3);
- EXPECT_EQ(quantity2, quantity3);
-
- // The quantity of the shared disk is removed when the last copy is removed.
- sorter.remove(slaveId, sharedDisk);
- EXPECT_EQ(sorter.totalScalarQuantities(), quantity3);
-
- sorter.remove(slaveId, sharedDisk);
- EXPECT_EQ(sorter.totalScalarQuantities(), quantity1);
-}
-
-
// This benchmark simulates sorting a number of clients that have
// different amount of allocations.
//
@@ -1907,8 +1828,8 @@ TYPED_TEST(CommonSorterTest, BENCHMARK_FullSort)
cout << "Added " << clientCount << " clients in "
<< watch.elapsed() << endl;
- Resources agentResources = Resources::parse(
- "cpus:24;mem:4096;disk:4096;ports:[31000-32000]").get();
+ const ResourceQuantities agentScalarQuantities =
+ *ResourceQuantities::fromString("cpus:24;mem:4096;disk:4096");
watch.start();
{
@@ -1918,7 +1839,7 @@ TYPED_TEST(CommonSorterTest, BENCHMARK_FullSort)
agents.push_back(slaveId);
- sorter.add(slaveId, agentResources);
+ sorter.addSlave(slaveId, agentScalarQuantities);
}
}
watch.stop();
@@ -1986,7 +1907,7 @@ TYPED_TEST(CommonSorterTest, BENCHMARK_FullSort)
watch.start();
{
foreach (const SlaveID& slaveId, agents) {
- sorter.remove(slaveId, agentResources);
+ sorter.removeSlave(slaveId);
}
}
watch.stop();
@@ -2087,8 +2008,8 @@ TYPED_TEST(CommonSorterTest, BENCHMARK_HierarchyFullSort)
cout << "Added " << clientCount << " clients in "
<< watch.elapsed() << endl;
- Resources agentResources = Resources::parse(
- "cpus:24;mem:4096;disk:4096;ports:[31000-32000]").get();
+ const ResourceQuantities agentScalarQuantities =
+ *ResourceQuantities::fromString("cpus:24;mem:4096;disk:4096");
watch.start();
{
@@ -2098,7 +2019,7 @@ TYPED_TEST(CommonSorterTest, BENCHMARK_HierarchyFullSort)
agents.push_back(slaveId);
- sorter.add(slaveId, agentResources);
+ sorter.addSlave(slaveId, agentScalarQuantities);
}
}
watch.stop();
@@ -2166,7 +2087,7 @@ TYPED_TEST(CommonSorterTest, BENCHMARK_HierarchyFullSort)
watch.start();
{
foreach (const SlaveID& slaveId, agents) {
- sorter.remove(slaveId, agentResources);
+ sorter.removeSlave(slaveId);
}
}
watch.stop();