This is an automated email from the ASF dual-hosted git repository. mzhu pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/mesos.git
commit fdaabac781d62f7c969064009e6dc8e109617b74 Author: Meng Zhu <[email protected]> AuthorDate: Tue Sep 3 13:44:39 2019 -0700 Removed `Sorter::allocation(const SlaveID& slaveId)`. This paves the way for removing the allocation info tracking in the sorter (in favor of tracking them in the allocator). Review: https://reviews.apache.org/r/71428 --- src/master/allocator/mesos/hierarchical.cpp | 121 ++++++++++----------- src/master/allocator/mesos/sorter/drf/sorter.cpp | 26 ----- src/master/allocator/mesos/sorter/drf/sorter.hpp | 3 - .../allocator/mesos/sorter/random/sorter.cpp | 27 ----- .../allocator/mesos/sorter/random/sorter.hpp | 3 - src/master/allocator/mesos/sorter/sorter.hpp | 4 - src/tests/sorter_tests.cpp | 13 --- 7 files changed, 55 insertions(+), 142 deletions(-) diff --git a/src/master/allocator/mesos/hierarchical.cpp b/src/master/allocator/mesos/hierarchical.cpp index a477e50..8432a62 100644 --- a/src/master/allocator/mesos/hierarchical.cpp +++ b/src/master/allocator/mesos/hierarchical.cpp @@ -2348,74 +2348,63 @@ void HierarchicalAllocatorProcess::generateInverseOffers() // want the master to create `InverseOffer`s from. hashmap<FrameworkID, hashmap<SlaveID, UnavailableResources>> offerable; - // For maintenance, we use the framework sorters to determine which frameworks - // have (1) reserved and / or (2) unreserved resource on the specified - // slaveIds. This way we only send inverse offers to frameworks that have the - // potential to lose something. We keep track of which frameworks already have - // an outstanding inverse offer for the given slave in the - // UnavailabilityStatus of the specific slave using the `offerOutstanding` - // flag. This is equivalent to the accounting we do for resources when we send - // regular offers. If we didn't keep track of outstanding offers then we would - // keep generating new inverse offers even though the framework had not - // responded yet. - - foreachvalue (const Owned<Sorter>& frameworkSorter, frameworkSorters) { - foreach (const SlaveID& slaveId, allocationCandidates) { - Slave& slave = *CHECK_NOTNONE(getSlave(slaveId)); - - if (slave.maintenance.isSome()) { - // We use a reference by alias because we intend to modify the - // `maintenance` and to improve readability. - Slave::Maintenance& maintenance = slave.maintenance.get(); - - hashmap<string, Resources> allocation = - frameworkSorter->allocation(slaveId); - - foreachkey (const string& frameworkId_, allocation) { - FrameworkID frameworkId; - frameworkId.set_value(frameworkId_); - - const Framework& framework = - *CHECK_NOTNONE(getFramework(frameworkId)); - - // No need to deallocate for an inactive framework as the master - // will not send it inverse offers. - if (!framework.active) { - continue; - } + // For maintenance, we only send inverse offers to frameworks that have the + // potential to lose something (i.e. it has resources offered or allocated on + // a given agent). We keep track of which frameworks already have an + // outstanding inverse offer for the given slave in the UnavailabilityStatus + // of the specific slave using the `offerOutstanding` flag. This is equivalent + // to the accounting we do for resources when we send regular offers. If we + // didn't keep track of outstanding offers then we would keep generating new + // inverse offers even though the framework had not responded yet. + // + // TODO(mzhu): Need to consider reservations as well. + foreach (const SlaveID& slaveId, allocationCandidates) { + Slave& slave = *CHECK_NOTNONE(getSlave(slaveId)); + + if (slave.maintenance.isSome()) { + // We use a reference by alias because we intend to modify the + // `maintenance` and to improve readability. + Slave::Maintenance& maintenance = slave.maintenance.get(); + + foreachkey ( + const FrameworkID& frameworkId, slave.getOfferedOrAllocated()) { + const Framework& framework = *CHECK_NOTNONE(getFramework(frameworkId)); + + // No need to deallocate for an inactive framework as the master + // will not send it inverse offers. + if (!framework.active) { + continue; + } - // If this framework doesn't already have inverse offers for the - // specified slave. - if (!offerable[frameworkId].contains(slaveId)) { - // If there isn't already an outstanding inverse offer to this - // framework for the specified slave. - if (!maintenance.offersOutstanding.contains(frameworkId)) { - // Ignore in case the framework filters inverse offers for this - // slave. - // - // NOTE: Since this specific allocator implementation only sends - // inverse offers for maintenance primitives, and those are at the - // whole slave level, we only need to filter based on the - // time-out. - if (isFiltered(framework, slave)) { - continue; - } - - const UnavailableResources unavailableResources = - UnavailableResources{ - Resources(), - maintenance.unavailability}; - - // For now we send inverse offers with empty resources when the - // inverse offer represents maintenance on the machine. In the - // future we could be more specific about the resources on the - // host, as we have the information available. - offerable[frameworkId][slaveId] = unavailableResources; - - // Mark this framework as having an offer outstanding for the - // specified slave. - maintenance.offersOutstanding.insert(frameworkId); + // If this framework doesn't already have inverse offers for the + // specified slave. + if (!offerable[frameworkId].contains(slaveId)) { + // If there isn't already an outstanding inverse offer to this + // framework for the specified slave. + if (!maintenance.offersOutstanding.contains(frameworkId)) { + // Ignore in case the framework filters inverse offers for this + // slave. + // + // NOTE: Since this specific allocator implementation only sends + // inverse offers for maintenance primitives, and those are at the + // whole slave level, we only need to filter based on the + // time-out. + if (isFiltered(framework, slave)) { + continue; } + + const UnavailableResources unavailableResources = + UnavailableResources{Resources(), maintenance.unavailability}; + + // For now we send inverse offers with empty resources when the + // inverse offer represents maintenance on the machine. In the + // future we could be more specific about the resources on the + // host, as we have the information available. + offerable[frameworkId][slaveId] = unavailableResources; + + // Mark this framework as having an offer outstanding for the + // specified slave. + maintenance.offersOutstanding.insert(frameworkId); } } } diff --git a/src/master/allocator/mesos/sorter/drf/sorter.cpp b/src/master/allocator/mesos/sorter/drf/sorter.cpp index 09889cd..ef79083 100644 --- a/src/master/allocator/mesos/sorter/drf/sorter.cpp +++ b/src/master/allocator/mesos/sorter/drf/sorter.cpp @@ -431,32 +431,6 @@ const ResourceQuantities& DRFSorter::allocationScalarQuantities() const } -hashmap<string, Resources> DRFSorter::allocation(const SlaveID& slaveId) const -{ - hashmap<string, Resources> result; - - // We want to find the allocation that has been made to each client - // on a particular `slaveId`. Rather than traversing the tree - // looking for leaf nodes (clients), we can instead just iterate - // over the `clients` hashmap. - // - // TODO(jmlvanre): We can index the allocation by slaveId to make - // this faster. It is a tradeoff between speed vs. memory. For now - // we use existing data structures. - foreachvalue (const Node* client, clients) { - if (client->allocation.resources.contains(slaveId)) { - // It is safe to use `at()` here because we've just checked the - // existence of the key. This avoids unnecessary copies. - string path = client->clientPath(); - CHECK(!result.contains(path)); - result.emplace(path, client->allocation.resources.at(slaveId)); - } - } - - return result; -} - - Resources DRFSorter::allocation( const string& clientPath, const SlaveID& slaveId) const diff --git a/src/master/allocator/mesos/sorter/drf/sorter.hpp b/src/master/allocator/mesos/sorter/drf/sorter.hpp index f157ec6..2a861e2 100644 --- a/src/master/allocator/mesos/sorter/drf/sorter.hpp +++ b/src/master/allocator/mesos/sorter/drf/sorter.hpp @@ -90,9 +90,6 @@ public: const ResourceQuantities& allocationScalarQuantities() const override; - hashmap<std::string, Resources> allocation( - const SlaveID& slaveId) const override; - Resources allocation( const std::string& clientPath, const SlaveID& slaveId) const override; diff --git a/src/master/allocator/mesos/sorter/random/sorter.cpp b/src/master/allocator/mesos/sorter/random/sorter.cpp index 60a5797..86aeb1b 100644 --- a/src/master/allocator/mesos/sorter/random/sorter.cpp +++ b/src/master/allocator/mesos/sorter/random/sorter.cpp @@ -367,33 +367,6 @@ const ResourceQuantities& RandomSorter::allocationScalarQuantities() const } -hashmap<string, Resources> RandomSorter::allocation( - const SlaveID& slaveId) const -{ - hashmap<string, Resources> result; - - // We want to find the allocation that has been made to each client - // on a particular `slaveId`. Rather than traversing the tree - // looking for leaf nodes (clients), we can instead just iterate - // over the `clients` hashmap. - // - // TODO(jmlvanre): We can index the allocation by slaveId to make - // this faster. It is a tradeoff between speed vs. memory. For now - // we use existing data structures. - foreachvalue (const Node* client, clients) { - if (client->allocation.resources.contains(slaveId)) { - // It is safe to use `at()` here because we've just checked the - // existence of the key. This avoids unnecessary copies. - string path = client->clientPath(); - CHECK(!result.contains(path)); - result.emplace(path, client->allocation.resources.at(slaveId)); - } - } - - return result; -} - - Resources RandomSorter::allocation( const string& clientPath, const SlaveID& slaveId) const diff --git a/src/master/allocator/mesos/sorter/random/sorter.hpp b/src/master/allocator/mesos/sorter/random/sorter.hpp index 8663ccd..bc55809 100644 --- a/src/master/allocator/mesos/sorter/random/sorter.hpp +++ b/src/master/allocator/mesos/sorter/random/sorter.hpp @@ -90,9 +90,6 @@ public: const ResourceQuantities& allocationScalarQuantities() const override; - hashmap<std::string, Resources> allocation( - const SlaveID& slaveId) const override; - Resources allocation( const std::string& clientPath, const SlaveID& slaveId) const override; diff --git a/src/master/allocator/mesos/sorter/sorter.hpp b/src/master/allocator/mesos/sorter/sorter.hpp index 52b8a7b..6b6b4a1 100644 --- a/src/master/allocator/mesos/sorter/sorter.hpp +++ b/src/master/allocator/mesos/sorter/sorter.hpp @@ -114,10 +114,6 @@ public: const std::string& client) const = 0; virtual const ResourceQuantities& allocationScalarQuantities() const = 0; - // Returns the clients that have allocations on this slave. - virtual hashmap<std::string, Resources> allocation( - const SlaveID& slaveId) const = 0; - // Returns the given slave's resources that have been allocated to // this client. virtual Resources allocation( diff --git a/src/tests/sorter_tests.cpp b/src/tests/sorter_tests.cpp index 97ab910..64627c4 100644 --- a/src/tests/sorter_tests.cpp +++ b/src/tests/sorter_tests.cpp @@ -737,14 +737,6 @@ TEST(DRFSorterTest, HierarchicalAllocation) EXPECT_EQ(vector<string>({"a", "b/d", "b/c"}), sorter.sort()); { - hashmap<string, Resources> agentAllocation = - sorter.allocation(slaveId); - - EXPECT_EQ(3u, agentAllocation.size()); - EXPECT_EQ(aResources, agentAllocation.at("a")); - EXPECT_EQ(cResources, agentAllocation.at("b/c")); - EXPECT_EQ(dResources, agentAllocation.at("b/d")); - EXPECT_EQ(aResources, sorter.allocation("a", slaveId)); EXPECT_EQ(cResources, sorter.allocation("b/c", slaveId)); EXPECT_EQ(dResources, sorter.allocation("b/d", slaveId)); @@ -1298,11 +1290,6 @@ TYPED_TEST(CommonSorterTest, AllocationForInactiveClient) sorter.allocated("a", slaveId, Resources::parse("cpus:2;mem:2").get()); sorter.allocated("b", slaveId, Resources::parse("cpus:3;mem:3").get()); - hashmap<string, Resources> clientAllocation = sorter.allocation(slaveId); - EXPECT_EQ(2u, clientAllocation.size()); - EXPECT_EQ(Resources::parse("cpus:2;mem:2").get(), clientAllocation.at("a")); - EXPECT_EQ(Resources::parse("cpus:3;mem:3").get(), clientAllocation.at("b")); - hashmap<SlaveID, Resources> agentAllocation1 = sorter.allocation("a"); EXPECT_EQ(1u, agentAllocation1.size()); EXPECT_EQ(
