This is an automated email from the ASF dual-hosted git repository.
ccondit pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-core.git
The following commit(s) were added to refs/heads/master by this push:
new 9f17e191 [YUNIKORN-2678] Improve FAIR queue sort algorithm (#935)
9f17e191 is described below
commit 9f17e1917603d9a810612d30bb70c53dcb080c79
Author: Paul Santa Clara <[email protected]>
AuthorDate: Tue Aug 27 10:46:17 2024 -0500
[YUNIKORN-2678] Improve FAIR queue sort algorithm (#935)
Update the queue sort algorithm to compute a fair usage ratio using
the resource with the highest usage percentage of guarantted, max, or
available resources (in that order). Queues will ordered with the lowest
fair usage ratio first.
Closes: #935
Signed-off-by: Craig Condit <[email protected]>
---
pkg/common/resources/resources.go | 84 ++++++++--
pkg/common/resources/resources_test.go | 283 ++++++++++++++++++++++++++-------
pkg/scheduler/objects/queue.go | 36 ++++-
pkg/scheduler/objects/queue_test.go | 153 ++++++++++++++++++
pkg/scheduler/objects/sorters.go | 21 +--
pkg/scheduler/objects/sorters_test.go | 57 ++++---
6 files changed, 526 insertions(+), 108 deletions(-)
diff --git a/pkg/common/resources/resources.go
b/pkg/common/resources/resources.go
index 0f6083ff..685d9f27 100644
--- a/pkg/common/resources/resources.go
+++ b/pkg/common/resources/resources.go
@@ -482,6 +482,62 @@ func (r *Resource) fitIn(smaller *Resource, skipUndef
bool) bool {
return true
}
+// getShareFairForDenominator attempts to computes the denominator for a
queue's fair share ratio.
+// Here Resources can be either guaranteed Resources or fairmax Resources.
+// If the quanity is explicitly 0 or negative, we will check usage. If usage
>= 0, the share will be set to 1.0. Otherwise, it will be set 0.0.
+func getShareFairForDenominator(resourceType string, allocated Quantity,
denominatorResources *Resource) (float64, bool) {
+ if denominatorResources == nil {
+ return 0.0, false
+ }
+
+ denominator, ok := denominatorResources.Resources[resourceType]
+
+ switch {
+ case ok && denominator <= 0:
+ if allocated <= 0 {
+ // explicit 0 or negative value with NO usage
+ return 0.0, true
+ } else {
+ // explicit 0 or negative value with usage
+ return 1.0, true
+ }
+ case denominator > 0:
+ return (float64(allocated) / float64(denominator)), true
+ default:
+ // no denominator. ie. no guarantee or fairmax for resourceType
+ return 0.0, false
+ }
+}
+
+// getFairShare produces a ratio which represents it's current 'fair' share
usage.
+// Iterate over all of the allocated resource types. For each, compute the
ratio, ultimately returning the max ratio encountered.
+// The numerator will be the allocated usage.
+// If guarantees are present, they will be used for the denominator, otherwise
we will fallback to the 'maxfair' capacity of the cluster.
+func getFairShare(allocated, guaranteed, fair *Resource) float64 {
+ if allocated == nil || len(allocated.Resources) == 0 {
+ return 0.0
+ }
+
+ var maxShare float64
+ for k, v := range allocated.Resources {
+ var nextShare float64
+
+ // if usage <= 0, resource has no share
+ if allocated.Resources[k] < 0 {
+ continue
+ }
+
+ nextShare, found := getShareFairForDenominator(k, v, guaranteed)
+ if !found {
+ nextShare, found = getShareFairForDenominator(k, v,
fair)
+ }
+ if found && nextShare > maxShare {
+ maxShare = nextShare
+ }
+ }
+ return maxShare
+}
+
// Get the share of each resource quantity when compared to the total
// resources quantity
// NOTE: shares can be negative and positive in the current assumptions
@@ -546,24 +602,18 @@ func CompUsageRatio(left, right, total *Resource) int {
// 0 for equal shares
// 1 if the left share is larger
// -1 if the right share is larger
-func CompUsageRatioSeparately(left, leftTotal, right, rightTotal *Resource)
int {
- lshares := getShares(left, leftTotal)
- rshares := getShares(right, rightTotal)
+func CompUsageRatioSeparately(leftAllocated, leftGuaranteed, leftFairMax,
rightAllocated, rightGuaranteed, rightFairMax *Resource) int {
+ lshare := getFairShare(leftAllocated, leftGuaranteed, leftFairMax)
+ rshare := getFairShare(rightAllocated, rightGuaranteed, rightFairMax)
- return compareShares(lshares, rshares)
-}
-
-// Compare two resources usage shares and assumes a nil total resource.
-// The share is thus equivalent to the usage passed in.
-// This returns the same value as compareShares does:
-// 0 for equal shares
-// 1 if the left share is larger
-// -1 if the right share is larger
-func CompUsageShares(left, right *Resource) int {
- lshares := getShares(left, nil)
- rshares := getShares(right, nil)
-
- return compareShares(lshares, rshares)
+ switch {
+ case lshare > rshare:
+ return 1
+ case lshare < rshare:
+ return -1
+ default:
+ return 0
+ }
}
// Get fairness ratio calculated by:
diff --git a/pkg/common/resources/resources_test.go
b/pkg/common/resources/resources_test.go
index 9273fd01..d9766ea7 100644
--- a/pkg/common/resources/resources_test.go
+++ b/pkg/common/resources/resources_test.go
@@ -1314,6 +1314,168 @@ func TestFitInSkip(t *testing.T) {
}
}
+//nolint:funlen // thorough test
+func TestGetFairShare(t *testing.T) {
+ // 0 guarantee should be treated as absence of a gurantee
+ // test to protect against division by 0. full=0. rare but possible.
+ tests := []struct {
+ allocated *Resource
+ guaranteed *Resource
+ fairmax *Resource
+ expected float64
+ }{
+ // guarantees exist for each resource type so full does not
come into play
+ {
+ allocated: &Resource{Resources:
map[string]Quantity{"vcores": 0}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcores": 1000}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"vcores": 99999}},
+ expected: float64(0.0) / float64(1000.0),
+ },
+ {
+ allocated: &Resource{Resources:
map[string]Quantity{"vcores": 100, "memory": 2500}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcores": 1000, "memory": 5000}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"vcores": 99999, "memory": 99999}},
+ expected: float64(2500) / float64(5000),
+ },
+ {
+ allocated: &Resource{Resources:
map[string]Quantity{"vcores": 100, "memory": 2500, "ephemeral-storage": 100}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcores": 1000, "memory": 5000, "ephemeral-storage": 2000}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"vcores": 99999, "memory": 99999, "ephemeral-storage":
99999}},
+ expected: float64(2500) / float64(5000),
+ },
+
+ // in the absence of guarantees the share denominator will be
determined by the full value
+ { // ephemeral-storage = 1000 / 2000 = 0.5; fairmax
ephemeral-stagoe dominates
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 2000, "memory": 99999, "pods": 99999,
"vcore": 99999}},
+ expected: float64(1000) / float64(2000),
+ },
+
+ // there is a guarantee on pods but fairmax ephemeral-storage
still dominates
+ { // ephemeral-storage = 0.5, memory = 1000 / 2000; fairmax
ephemeral-stagoe dominates
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"pods": 88888}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 2000, "memory": 99999, "pods": 99999,
"vcore": 99999}},
+ expected: float64(1000) / float64(2000),
+ },
+
+ // guarantee on vcores but it still dominates all fairmax
resources
+ { // vcores = 1000 / 2000; gauranteed vcores dominates
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcore": 2000}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 99999, "memory": 99999, "pods": 99999,
"vcore": 99999}},
+ expected: float64(1000) / float64(2000),
+ },
+
+ // 0 allocated
+ { // ephemeral-storage = 0 / 2000;
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 0, "memory": 0, "pods": 0, "vcore":
0}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcores": 2000}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 99999, "memory": 99999, "pods": 99999,
"vcore": 99999}},
+ expected: float64(0) / float64(2000),
+ },
+
+ // explicit 0 gurantee with usage
+ { // vcore has usage, therefore share is 1.0(100%)
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 10}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcore": 0}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 5000, "pods": 5000,
"vcore": 5000}},
+ expected: float64(1.0),
+ },
+
+ // explicit 0 gurantee with NO usage
+ { // vcore has NO usage, so it's share is 0. memory = 1000 /
5000 = 0.20
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 0}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcore": 0}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 5000, "pods": 5000,
"vcore": 5000}},
+ expected: float64(1000) / float64(5000),
+ },
+
+ // explicit 0 fairmax with usage
+ { // memory has explicity fairmax of 0 and usage. It's share
of 1.0 will dominate.
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 0, "pods": 5000,
"vcore": 5000}},
+ expected: float64(1.0),
+ },
+
+ // explicit 0 fairmax with NO usage
+ { // memory has explicit fairmax of 0 but no usage so it's
share will be 0. vcore dominates with 1000 / 4000 = 0.25
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 0, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 0, "pods": 5000,
"vcore": 4000}},
+ expected: float64(1000) / float64(4000),
+ },
+
+ // negative gaurantee with usage
+ { // negative guarantee with usage on vcore = 1.0
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcore": -10}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 5000, "pods": 5000,
"vcore": 5000}},
+ expected: float64(1.0),
+ },
+
+ // negative guaranteed with NO usage
+ { // negative guarantee with NO usage on vcore = 0.0. memory
dominates with 1000 / 2000 = 0.5
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 0}},
+ guaranteed: &Resource{Resources:
map[string]Quantity{"vcore": -10}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 2000, "pods": 5000,
"vcore": 5000}},
+ expected: float64(1000) / float64(2000),
+ },
+
+ // negative fairmax with usage
+ { // vcores = 1000 / -1000 = -1. share is dominated by memory
= 1000 / 5000 = 0.20
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 5000, "pods": 5000,
"vcore": -1000}},
+ expected: float64(1.0),
+ },
+
+ // negative fairmax with NO usage
+ { // vcores = 1000 / -1000 = -1. share is dominated by memory
= 1000 / 5000 = 0.20
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 0}},
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 5000, "pods": 5000,
"vcore": -1000}},
+ expected: float64(1000) / float64(5000),
+ },
+
+ // negative usage gets no share
+ { // vcores = -1000 / 1000 = -1. share is dominated by memory
= 1000 / 5000 = 0.20
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": -1000}},
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 5000, "memory": 5000, "pods": 5000,
"vcore": 1000}},
+ expected: float64(1000) / float64(5000),
+ },
+
+ // nil guarantees are ignored.
+ {
+ allocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ guaranteed: nil,
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expected: float64(1.0),
+ },
+
+ // nil usage
+ {
+ allocated: nil,
+ guaranteed: &Resource{Resources: map[string]Quantity{}},
+ fairmax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expected: float64(0.0),
+ },
+ }
+
+ for _, tc := range tests {
+ subtest := fmt.Sprintf("%s-%s-%s", tc.allocated, tc.guaranteed,
tc.fairmax)
+ t.Run(subtest, func(t *testing.T) {
+ share := getFairShare(tc.allocated, tc.guaranteed,
tc.fairmax)
+ if !reflect.DeepEqual(share, tc.expected) {
+ t.Errorf("incorrect share for allocated( %s ),
guaranteed( %s ), fairmax( %s ) expected %v got: %v", tc.allocated,
tc.guaranteed, tc.fairmax, tc.expected, share)
+ }
+ })
+ }
+}
+
func TestGetShares(t *testing.T) {
tests := []struct {
res *Resource
@@ -1700,82 +1862,83 @@ func TestFairnessRatio(t *testing.T) {
}
}
-// This tests just to cover code in the CompUsageRatio,
CompUsageRatioSeparately and CompUsageShare.
-// This does not check the share calculation and share comparison see
TestGetShares and TestCompShares for that.
-func TestCompUsage(t *testing.T) {
+// This tests just to cover code in the CompUsageRatioSeparately
+func TestCompUsageRatioSeparately(t *testing.T) {
tests := []struct {
- left *Resource
- right *Resource
- leftTotal *Resource
- rightTotal *Resource
- expectedShares int
- expectedRatio int
- message string
+ leftAllocated *Resource
+ rightAllocated *Resource
+ leftGuaranteed *Resource
+ rightGuaranteed *Resource
+ leftFairMax *Resource
+ rightFairMax *Resource
+ expectedRatio int
+ message string
}{
{
- left: NewResource(),
- right: NewResource(),
- leftTotal: NewResource(),
- rightTotal: NewResource(),
- expectedShares: 0,
- expectedRatio: 0,
- message: "empty resources",
- },
- {
- left: &Resource{Resources:
map[string]Quantity{"first": 50, "second": 50, "third": 50}},
- right: &Resource{Resources:
map[string]Quantity{"first": 10, "second": 10, "third": 10}},
- leftTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- rightTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- expectedShares: 1,
- expectedRatio: 1,
- message: "left larger than right",
+ leftAllocated: NewResource(),
+ rightAllocated: NewResource(),
+ leftGuaranteed: NewResource(),
+ rightGuaranteed: NewResource(),
+ leftFairMax: NewResource(),
+ rightFairMax: NewResource(),
+ expectedRatio: 0,
+ message: "empty resources",
+ },
+ {
+ leftAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ rightAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ leftGuaranteed: &Resource{Resources:
map[string]Quantity{"pods": 200}},
+ rightGuaranteed: &Resource{Resources:
map[string]Quantity{"pods": 800}},
+ leftFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ rightFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expectedRatio: 1,
+ message: "with gaurantees, left has larger
share",
},
{
- left: &Resource{Resources:
map[string]Quantity{"first": 10, "second": 10, "third": 10}},
- right: &Resource{Resources:
map[string]Quantity{"first": 50, "second": 50, "third": 50}},
- leftTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- rightTotal: &Resource{Resources:
map[string]Quantity{"first": 10, "second": 10, "third": 10}},
- expectedShares: -1,
- expectedRatio: -1,
- message: "right larger than left",
+ leftAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ rightAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ leftGuaranteed: &Resource{Resources:
map[string]Quantity{"pods": 800}},
+ rightGuaranteed: &Resource{Resources:
map[string]Quantity{"pods": 200}},
+ leftFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ rightFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expectedRatio: -1,
+ message: "with gaurantees, right has larger
share",
},
{
- left: &Resource{Resources:
map[string]Quantity{"first": 50, "second": 50, "third": 50}},
- right: &Resource{Resources:
map[string]Quantity{"first": 10, "second": 10, "third": 10}},
- leftTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- rightTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- expectedShares: 1,
- expectedRatio: 1,
- message: "CompUsageRatioSeparately - left larger
than right",
+ leftAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 200, "memory": 200, "pods": 200,
"vcore": 200}},
+ rightAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ leftGuaranteed: NewResource(),
+ rightGuaranteed: NewResource(),
+ leftFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ rightFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expectedRatio: 1,
+ message: "no gaurantees, left has larger share",
},
{
- left: &Resource{Resources:
map[string]Quantity{"first": 10, "second": 10, "third": 10}},
- right: &Resource{Resources:
map[string]Quantity{"first": 50, "second": 50, "third": 50}},
- leftTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- rightTotal: &Resource{Resources:
map[string]Quantity{"first": 10, "second": 10, "third": 10}},
- expectedShares: -1,
- expectedRatio: -1,
- message: "CompUsageRatioSeparately - right
larger than left",
+ leftAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ rightAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 200, "memory": 200, "pods": 200,
"vcore": 200}},
+ leftGuaranteed: NewResource(),
+ rightGuaranteed: NewResource(),
+ leftFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ rightFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expectedRatio: -1,
+ message: "no gaurantees, right has larger
share",
},
{
- left: &Resource{Resources:
map[string]Quantity{"first": 50, "second": 50, "third": 50}},
- right: &Resource{Resources:
map[string]Quantity{"first": 50, "second": 50, "third": 50}},
- leftTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- rightTotal: &Resource{Resources:
map[string]Quantity{"first": 100, "second": 100, "third": 100}},
- expectedShares: 0,
- expectedRatio: 0,
- message: "CompUsageRatioSeparately - equal
values",
+ leftAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ rightAllocated: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 100, "memory": 100, "pods": 100,
"vcore": 100}},
+ leftGuaranteed: NewResource(),
+ rightGuaranteed: NewResource(),
+ leftFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ rightFairMax: &Resource{Resources:
map[string]Quantity{"ephemeral-storage": 1000, "memory": 1000, "pods": 1000,
"vcore": 1000}},
+ expectedRatio: 0,
+ message: "no guarantees, tie",
},
}
for _, tc := range tests {
t.Run(tc.message, func(t *testing.T) {
- shares := CompUsageShares(tc.left, tc.right)
- if shares != tc.expectedShares {
- t.Errorf("%s: expected shares %d, got: %d",
tc.message, tc.expectedShares, shares)
- }
-
- ratio := CompUsageRatioSeparately(tc.left,
tc.leftTotal, tc.right, tc.rightTotal)
+ ratio := CompUsageRatioSeparately(tc.leftAllocated,
tc.leftGuaranteed, tc.leftFairMax, tc.rightAllocated, tc.rightGuaranteed,
tc.rightFairMax)
if ratio != tc.expectedRatio {
t.Errorf("%s: expected ratio %d, got: %d",
tc.message, tc.expectedRatio, ratio)
}
diff --git a/pkg/scheduler/objects/queue.go b/pkg/scheduler/objects/queue.go
index 2668183a..5296a36f 100644
--- a/pkg/scheduler/objects/queue.go
+++ b/pkg/scheduler/objects/queue.go
@@ -1177,6 +1177,7 @@ func (sq *Queue) sortQueues() []*Queue {
}
// Create a list of the queues with pending resources
sortedQueues := make([]*Queue, 0)
+ sortedMaxFairResources := make([]*resources.Resource, 0)
for _, child := range sq.GetCopyOfChildren() {
// a stopped queue cannot be scheduled
if child.IsStopped() {
@@ -1185,10 +1186,11 @@ func (sq *Queue) sortQueues() []*Queue {
// queue must have pending resources to be considered for
scheduling
if
resources.StrictlyGreaterThanZero(child.GetPendingResource()) {
sortedQueues = append(sortedQueues, child)
+ sortedMaxFairResources = append(sortedMaxFairResources,
child.GetFairMaxResource())
}
}
// Sort the queues
- sortQueue(sortedQueues, sq.getSortType(), sq.IsPrioritySortEnabled())
+ sortQueue(sortedQueues, sortedMaxFairResources, sq.getSortType(),
sq.IsPrioritySortEnabled())
return sortedQueues
}
@@ -1258,6 +1260,38 @@ func (sq *Queue) GetMaxResource() *resources.Resource {
return sq.internalGetMax(limit)
}
+// GetFairMaxResource computes the fair max resources for a given queue.
+// Starting with the root, descend down to the target queue allowing children
to override Resource values .
+// If the root includes an explicit 0 value for a Resource, do not include it
in the accumulator and treat it as missing.
+// If no children provide a maximum capacity override, the resulting value
will be the value found on the Root.
+// It is useful for fair-scheduling to allow a ratio to be produced
representing the rough utilization % of a given queue.
+func (sq *Queue) GetFairMaxResource() *resources.Resource {
+ var limit *resources.Resource
+ if sq.parent == nil {
+ return sq.GetMaxResource().Clone()
+ }
+
+ limit = sq.parent.GetFairMaxResource()
+ return sq.internalGetFairMaxResource(limit)
+}
+
+func (sq *Queue) internalGetFairMaxResource(limit *resources.Resource)
*resources.Resource {
+ sq.RLock()
+ defer sq.RUnlock()
+
+ out := limit.Clone()
+ if sq.maxResource.IsEmpty() || out.IsEmpty() {
+ return out
+ }
+
+ // perform merge. child wins every resources collision
+ for k, v := range sq.maxResource.Resources {
+ out.Resources[k] = v
+ }
+
+ return out
+}
+
// GetMaxQueueSet returns the max resource for the queue. The max resource
should never be larger than the
// max resource of the parent. The cluster size, which defines the root limit,
is not relevant for this call.
// Contrary to the GetMaxResource call. This will return nil unless a limit is
set.
diff --git a/pkg/scheduler/objects/queue_test.go
b/pkg/scheduler/objects/queue_test.go
index c0792d50..173d0355 100644
--- a/pkg/scheduler/objects/queue_test.go
+++ b/pkg/scheduler/objects/queue_test.go
@@ -869,6 +869,159 @@ func TestMaxHeadroomMax(t *testing.T) {
assert.Assert(t, resources.Equals(res, headRoom), "leaf2 queue head
room not as expected %v, got: %v", res, headRoom)
}
+// nolint: funlen
+func TestGetFairMaxResource(t *testing.T) {
+ tests := []struct {
+ name string
+ RootResource map[string]string
+ ParentResource map[string]string
+ Tier0Resource map[string]string
+ Tier0Expectation map[string]string
+ Tier1Resource map[string]string
+ Tier1Expectation map[string]string
+ }{
+ {
+ name: "children provide overrides for
resources types",
+ RootResource: map[string]string{"vcore": "1000m"},
+ ParentResource: map[string]string{},
+ Tier0Resource: map[string]string{"vcore": "800m"},
+ Tier0Expectation: map[string]string{"vcore": "800m"},
+ Tier1Resource: map[string]string{"vcore": "1200m"},
+ Tier1Expectation: map[string]string{"vcore": "1200m"},
+ },
+ {
+ name: "0's in the root are ommitted. there
is no ephemeral-storage available on this cluster",
+ RootResource:
map[string]string{"ephemeral-storage": "0", "memory": "1000", "pods": "1000",
"vcore": "1000m"},
+ ParentResource: map[string]string{},
+ Tier0Resource: map[string]string{},
+ Tier0Expectation: map[string]string{"memory": "1000",
"pods": "1000", "vcore": "1000m"},
+ Tier1Resource: map[string]string{"vcore": "900m"},
+ Tier1Expectation: map[string]string{"memory": "1000",
"pods": "1000", "vcore": "900m"},
+ },
+ {
+ name: "children provide maximum for
resource type that do NOT exist on root queue currently but may later because
of autoscaling",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: map[string]string{},
+ Tier0Resource: map[string]string{"nvidia.com/gpu":
"100", "vcore": "800m"},
+ Tier0Expectation: map[string]string{"nvidia.com/gpu":
"100", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"800m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ },
+ {
+ name: "this is true even if they are on the
root queue but are currently zero",
+ RootResource: map[string]string{"nvidia.com/gpu":
"0", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"0m"},
+ ParentResource: map[string]string{},
+ Tier0Resource: map[string]string{"nvidia.com/gpu":
"100", "vcore": "800m"},
+ Tier0Expectation: map[string]string{"nvidia.com/gpu":
"100", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"800m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000"},
+ },
+ {
+ name: "multiple level restrictions",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: map[string]string{"vcore": "900m"},
+ Tier0Resource: map[string]string{"vcore": "800m"},
+ Tier0Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "800m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "900m"},
+ },
+ {
+ name: "explicity 0's are honored for
non-root queues",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: map[string]string{},
+ Tier0Resource:
map[string]string{"ephemeral-storage": "1000", "vcore": "0m"},
+ Tier0Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "0m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ },
+
+ {
+ name: "nil root resources( no nodes in
cluster)",
+ RootResource: nil,
+ ParentResource: map[string]string{},
+ Tier0Resource: map[string]string{},
+ Tier0Expectation: nil,
+ Tier1Resource: map[string]string{},
+ Tier1Expectation: nil,
+ },
+ {
+ name: "nil parent resources",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: nil,
+ Tier0Resource:
map[string]string{"ephemeral-storage": "1000", "vcore": "0m"},
+ Tier0Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "0m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ },
+ {
+ name: "nil leaf resources",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: map[string]string{},
+ Tier0Resource:
map[string]string{"ephemeral-storage": "1000", "vcore": "0m"},
+ Tier0Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "0m"},
+ Tier1Resource: nil,
+ Tier1Expectation:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ },
+ {
+ name: "parent max with type different than
child",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: map[string]string{"nvidia.com/gpu":
"100"},
+ Tier0Resource: map[string]string{"vcore": "800m"},
+ Tier0Expectation: map[string]string{"nvidia.com/gpu":
"100", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"800m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation: map[string]string{"nvidia.com/gpu":
"100", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"1000m"},
+ },
+ {
+ name: "parent explicit 0 limit for type not
set in child",
+ RootResource:
map[string]string{"ephemeral-storage": "1000", "memory": "1000", "pods":
"1000", "vcore": "1000m"},
+ ParentResource: map[string]string{"nvidia.com/gpu":
"0"},
+ Tier0Resource: map[string]string{"vcore": "800m"},
+ Tier0Expectation: map[string]string{"nvidia.com/gpu":
"0", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"800m"},
+ Tier1Resource: map[string]string{},
+ Tier1Expectation: map[string]string{"nvidia.com/gpu":
"0", "ephemeral-storage": "1000", "memory": "1000", "pods": "1000", "vcore":
"1000m"},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ // create root
+ root, err := createRootQueue(tc.RootResource)
+ assert.NilError(t, err, "queue create failed")
+
+ // create parent
+ parent, err := createManagedQueue(root, "parent", true,
tc.ParentResource)
+ assert.NilError(t, err, "failed to create 'parent'
queue")
+
+ // create tier0
+ tier0, err := createManagedQueue(parent, "tier0", true,
tc.Tier0Resource)
+ assert.NilError(t, err, "failed to create 'tier0'
queue")
+
+ // create tier1
+ tier1, err := createManagedQueue(parent, "tier1", true,
tc.Tier1Resource)
+ assert.NilError(t, err, "failed to create 'tier1'
queue")
+
+ if tc.Tier0Expectation != nil {
+ actualTier0Max := tier0.GetFairMaxResource()
+ expectedTier0Max, err :=
resources.NewResourceFromConf(tc.Tier0Expectation)
+ assert.NilError(t, err, "failed to create
resource")
+
+ if !resources.Equals(expectedTier0Max,
actualTier0Max) {
+ t.Errorf("root.parent.tier0 queue
expected max %v, got: %v", expectedTier0Max, actualTier0Max)
+ }
+ }
+ if tc.Tier1Expectation != nil {
+ actualTier1Max := tier1.GetFairMaxResource()
+ expectedTier1Max, err :=
resources.NewResourceFromConf(tc.Tier1Expectation)
+ assert.NilError(t, err, "failed to create
resource")
+
+ if !resources.Equals(expectedTier1Max,
actualTier1Max) {
+ t.Errorf("root.parent.tier1 queue
expected max %v, got: %v", expectedTier1Max, actualTier1Max)
+ }
+ }
+ })
+ }
+}
+
func TestGetMaxResource(t *testing.T) {
// create the root
root, err := createRootQueue(nil)
diff --git a/pkg/scheduler/objects/sorters.go b/pkg/scheduler/objects/sorters.go
index 1d7f039b..e677a712 100644
--- a/pkg/scheduler/objects/sorters.go
+++ b/pkg/scheduler/objects/sorters.go
@@ -27,13 +27,13 @@ import (
"github.com/apache/yunikorn-core/pkg/scheduler/policies"
)
-func sortQueue(queues []*Queue, sortType policies.SortPolicy, considerPriority
bool) {
+func sortQueue(queues []*Queue, fairMaxResources []*resources.Resource,
sortType policies.SortPolicy, considerPriority bool) {
sortingStart := time.Now()
if sortType == policies.FairSortPolicy {
if considerPriority {
- sortQueuesByPriorityAndFairness(queues)
+ sortQueuesByPriorityAndFairness(queues,
fairMaxResources)
} else {
- sortQueuesByFairnessAndPriority(queues)
+ sortQueuesByFairnessAndPriority(queues,
fairMaxResources)
}
} else {
if considerPriority {
@@ -53,7 +53,7 @@ func sortQueuesByPriority(queues []*Queue) {
})
}
-func sortQueuesByPriorityAndFairness(queues []*Queue) {
+func sortQueuesByPriorityAndFairness(queues []*Queue, fairMaxResources
[]*resources.Resource) {
sort.SliceStable(queues, func(i, j int) bool {
l := queues[i]
r := queues[j]
@@ -65,8 +65,10 @@ func sortQueuesByPriorityAndFairness(queues []*Queue) {
if lPriority < rPriority {
return false
}
- comp :=
resources.CompUsageRatioSeparately(l.GetAllocatedResource(),
l.GetGuaranteedResource(),
- r.GetAllocatedResource(), r.GetGuaranteedResource())
+
+ comp :=
resources.CompUsageRatioSeparately(l.GetAllocatedResource(),
l.GetGuaranteedResource(), fairMaxResources[i],
+ r.GetAllocatedResource(), r.GetGuaranteedResource(),
fairMaxResources[j])
+
if comp == 0 {
return
resources.StrictlyGreaterThan(resources.Sub(l.GetPendingResource(),
r.GetPendingResource()), resources.Zero)
}
@@ -74,12 +76,13 @@ func sortQueuesByPriorityAndFairness(queues []*Queue) {
})
}
-func sortQueuesByFairnessAndPriority(queues []*Queue) {
+func sortQueuesByFairnessAndPriority(queues []*Queue, fairMaxResources
[]*resources.Resource) {
sort.SliceStable(queues, func(i, j int) bool {
l := queues[i]
r := queues[j]
- comp :=
resources.CompUsageRatioSeparately(l.GetAllocatedResource(),
l.GetGuaranteedResource(),
- r.GetAllocatedResource(), r.GetGuaranteedResource())
+
+ comp :=
resources.CompUsageRatioSeparately(l.GetAllocatedResource(),
l.GetGuaranteedResource(), fairMaxResources[i],
+ r.GetAllocatedResource(), r.GetGuaranteedResource(),
fairMaxResources[j])
if comp == 0 {
lPriority := l.GetCurrentPriority()
rPriority := r.GetCurrentPriority()
diff --git a/pkg/scheduler/objects/sorters_test.go
b/pkg/scheduler/objects/sorters_test.go
index 79bf259f..834ab7cf 100644
--- a/pkg/scheduler/objects/sorters_test.go
+++ b/pkg/scheduler/objects/sorters_test.go
@@ -30,13 +30,19 @@ import (
"github.com/apache/yunikorn-core/pkg/scheduler/policies"
)
-// verify queue ordering is working
-func TestSortQueues(t *testing.T) {
+// verify queue ordering is working when explicity guarantees are provided
+func TestSortQueuesWithGuarantees(t *testing.T) {
root, err := createRootQueue(nil)
assert.NilError(t, err, "queue create failed")
var q0, q1, q2, q3 *Queue
var queues []*Queue
+ fairMaxResources :=
[]*resources.Resource{resources.NewResourceFromMap(map[string]resources.Quantity{}),
+ resources.NewResourceFromMap(map[string]resources.Quantity{}),
+ resources.NewResourceFromMap(map[string]resources.Quantity{}),
+ resources.NewResourceFromMap(map[string]resources.Quantity{}),
+ }
+
q0, err = createManagedQueue(root, "q0", false, nil)
assert.NilError(t, err, "failed to create leaf queue")
q0.guaranteedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 500,
"vcore": 500})
@@ -63,49 +69,50 @@ func TestSortQueues(t *testing.T) {
// fifo
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FifoSortPolicy, false)
+
+ sortQueue(queues, fairMaxResources, policies.FifoSortPolicy, false)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q0, q1, q2,
q3}), "fifo first")
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FifoSortPolicy, true)
+ sortQueue(queues, fairMaxResources, policies.FifoSortPolicy, true)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q0, q1,
q2}), "fifo first - priority")
// fifo - different starting order
queues = []*Queue{q1, q3, q0, q2}
- sortQueue(queues, policies.FifoSortPolicy, false)
+ sortQueue(queues, fairMaxResources, policies.FifoSortPolicy, false)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q1, q3, q0,
q2}), "fifo second")
queues = []*Queue{q1, q3, q0, q2}
- sortQueue(queues, policies.FifoSortPolicy, true)
+ sortQueue(queues, fairMaxResources, policies.FifoSortPolicy, true)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q1, q0,
q2}), "fifo second - priority")
// fairness ratios: q0:300/500=0.6, q1:200/300=0.67, q2:100/200=0.5,
q3:100/200=0.5
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, false)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, false)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair first")
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, true)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, true)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair first - priority")
// fairness ratios: q0:200/500=0.4, q1:300/300=1, q2:100/200=0.5,
q3:100/200=0.5
q0.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 200,
"vcore": 200})
q1.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 300,
"vcore": 300})
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, false)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, false)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q0, q3, q2,
q1}), "fair second")
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, true)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, true)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q0, q2,
q1}), "fair second - priority")
// fairness ratios: q0:150/500=0.3, q1:120/300=0.4, q2:100/200=0.5,
q3:100/200=0.5
q0.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 150,
"vcore": 150})
q1.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 120,
"vcore": 120})
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, false)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, false)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q0, q1, q3,
q2}), "fair third")
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, true)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, true)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q0, q1,
q2}), "fair third - priority")
// fairness ratios: q0:400/800=0.5, q1:200/400= 0.5, q2:100/200=0.5,
q3:100/200=0.5
@@ -114,12 +121,12 @@ func TestSortQueues(t *testing.T) {
q1.guaranteedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 400,
"vcore": 300})
q1.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 200,
"vcore": 150})
queues = []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, false)
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, false)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q0, q1,
q2}), "fair - pending resource")
}
// queue guaranteed resource is not set (same as a zero resource)
-func TestNoQueueLimits(t *testing.T) {
+func TestSortQueuesNoGuarantees(t *testing.T) {
root, err := createRootQueue(nil)
assert.NilError(t, err, "queue create failed")
@@ -145,16 +152,24 @@ func TestNoQueueLimits(t *testing.T) {
q3.currentPriority = 3
queues := []*Queue{q0, q1, q2, q3}
- sortQueue(queues, policies.FairSortPolicy, false)
- assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q1,
q0}), "fair no limit first")
- sortQueue(queues, policies.FairSortPolicy, true)
- assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair no limit first - priority")
+ fairMaxResources :=
[]*resources.Resource{resources.NewResourceFromMap(map[string]resources.Quantity{"memory":
1000, "vcore": 1000}),
+
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 1000,
"vcore": 1000}),
+
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 1000,
"vcore": 1000}),
+
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 1000,
"vcore": 1000}),
+ }
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, false)
+ assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q1,
q0}), "fair no gaurantees first")
+
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, true)
+ assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair no gaurantees first - priority")
q0.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 200,
"vcore": 200})
q1.allocatedResource =
resources.NewResourceFromMap(map[string]resources.Quantity{"memory": 300,
"vcore": 300})
- sortQueue(queues, policies.FairSortPolicy, false)
- assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair no limit second")
- sortQueue(queues, policies.FairSortPolicy, true)
+
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, false)
+ assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair no gaurantees second")
+
+ sortQueue(queues, fairMaxResources, policies.FairSortPolicy, true)
assert.Equal(t, queueNames(queues), queueNames([]*Queue{q3, q2, q0,
q1}), "fair no limit second - priority")
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]