This is an automated email from the ASF dual-hosted git repository.
dubeejw pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git
The following commit(s) were added to refs/heads/master by this push:
new a248348 Make fraction of managed invokers configurable. (#4220)
a248348 is described below
commit a248348349f0e8f20322d3efe8ca4f9b5097cce4
Author: Christian Bickel <[email protected]>
AuthorDate: Wed Jan 23 17:01:51 2019 +0100
Make fraction of managed invokers configurable. (#4220)
* Make fraction of managed invokers configurable.
This PR makes the fraction of the pool of managed invokers configurable.
This also allows to have an overlap between the pool of blackbox invokers and
the pool of managed invokers.
The proposal has been discussed here:
https://lists.apache.org/thread.html/508e10ec9e800bb239363861385818c95d90ad1aa7df64f3b8904580@%3Cdev.openwhisk.apache.org%3E
---
ansible/group_vars/all | 6 ++++--
ansible/roles/controller/tasks/deploy.yml | 2 ++
core/controller/src/main/resources/reference.conf | 1 +
.../loadBalancer/ShardingContainerPoolBalancer.scala | 16 ++++++++++++----
.../test/ShardingContainerPoolBalancerTests.scala | 17 +++++++++++++++--
5 files changed, 34 insertions(+), 8 deletions(-)
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 9aeb5a5..a806200 100755
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -60,6 +60,7 @@ limits:
# Moved here to avoid recursions. Please do not use outside of controller-dict.
__controller_ssl_keyPrefix: "controller-"
+__controller_blackbox_fraction: 0.10
# port means outer port
controller:
@@ -69,7 +70,8 @@ controller:
basePort: 10001
heap: "{{ controller_heap | default('2g') }}"
arguments: "{{ controller_arguments | default('') }}"
- blackboxFraction: "{{ controller_blackbox_fraction | default(0.10) }}"
+ managedFraction: "{{ controller_managed_fraction | default(1.0 -
(controller_blackbox_fraction | default(__controller_blackbox_fraction))) }}"
+ blackboxFraction: "{{ controller_blackbox_fraction |
default(__controller_blackbox_fraction) }}"
timeoutFactor: "{{ controller_timeout_factor | default(2) }}"
instances: "{{ groups['controllers'] | length }}"
localBookkeeping: "{{ controller_local_bookkeeping | default('false') }}"
@@ -264,7 +266,7 @@ apigateway:
redis:
version: 4.0
port: 6379
- password: openwhisk
+ password: openwhisk
linux:
version: 4.4.0-31
diff --git a/ansible/roles/controller/tasks/deploy.yml
b/ansible/roles/controller/tasks/deploy.yml
index b1b3a0e..e13b6f9 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -227,6 +227,8 @@
"{{ controller.ssl.storeFlavor }}"
"CONFIG_whisk_controller_https_clientAuth":
"{{ controller.ssl.clientAuth }}"
+ "CONFIG_whisk_loadbalancer_managedFraction":
+ "{{ controller.managedFraction }}"
"CONFIG_whisk_loadbalancer_blackboxFraction":
"{{ controller.blackboxFraction }}"
"CONFIG_whisk_loadbalancer_timeoutFactor":
diff --git a/core/controller/src/main/resources/reference.conf
b/core/controller/src/main/resources/reference.conf
index 3f76e0c..647875a 100644
--- a/core/controller/src/main/resources/reference.conf
+++ b/core/controller/src/main/resources/reference.conf
@@ -6,6 +6,7 @@ whisk {
use-cluster-bootstrap: false
}
loadbalancer {
+ managed-fraction: 90%
blackbox-fraction: 10%
# factor to increase the timeout for forced active acks
# timeout = time-limit.std * timeoutfactor + 1m
diff --git
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index e9038cf..4d24725 100644
---
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -617,8 +617,16 @@ case class ShardingContainerPoolBalancerState(
lbConfig: ShardingContainerPoolBalancerConfig =
loadConfigOrThrow[ShardingContainerPoolBalancerConfig](ConfigKeys.loadbalancer))(implicit
logging: Logging) {
- private val blackboxFraction: Double = Math.max(0.0, Math.min(1.0,
lbConfig.blackboxFraction))
- logging.info(this, s"blackboxFraction =
$blackboxFraction")(TransactionId.loadbalancer)
+ // Managed fraction and blackbox fraction can be between 0.0 and 1.0. The
sum of these two fractions has to be between
+ // 1.0 and 2.0.
+ // If the sum is 1.0 that means, that there is no overlap of blackbox and
managed invokers. If the sum is 2.0, that
+ // means, that there is no differentiation between managed and blackbox
invokers.
+ // If the sum is below 1.0 with the initial values from config, the blackbox
fraction will be set higher than
+ // specified in config and adapted to the managed fraction.
+ private val managedFraction: Double = Math.max(0.0, Math.min(1.0,
lbConfig.managedFraction))
+ private val blackboxFraction: Double = Math.max(1.0 - managedFraction,
Math.min(1.0, lbConfig.blackboxFraction))
+ logging.info(this, s"managedFraction = $managedFraction, blackboxFraction =
$blackboxFraction")(
+ TransactionId.loadbalancer)
/** Getters for the variables, setting from the outside is only allowed
through the update methods below */
def invokers: IndexedSeq[InvokerHealth] = _invokers
@@ -663,7 +671,7 @@ case class ShardingContainerPoolBalancerState(
// for small N, allow the managed invokers to overlap with blackbox
invokers, and
// further assume that blackbox invokers << managed invokers
- val managed = Math.max(1, Math.ceil(newSize.toDouble * (1 -
blackboxFraction)).toInt)
+ val managed = Math.max(1, Math.ceil(newSize.toDouble *
managedFraction).toInt)
val blackboxes = Math.max(1, Math.floor(newSize.toDouble *
blackboxFraction).toInt)
_invokers = newInvokers
@@ -721,7 +729,7 @@ case class ClusterConfig(useClusterBootstrap: Boolean)
* @param blackboxFraction the fraction of all invokers to use exclusively for
blackboxes
* @param timeoutFactor factor to influence the timeout period for forced
active acks (time-limit.std * timeoutFactor + 1m)
*/
-case class ShardingContainerPoolBalancerConfig(blackboxFraction: Double,
timeoutFactor: Int)
+case class ShardingContainerPoolBalancerConfig(managedFraction: Double,
blackboxFraction: Double, timeoutFactor: Int)
/**
* State kept for each activation slot until completion.
diff --git
a/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
b/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
index 2c34b01..da166e7 100644
---
a/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
+++
b/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
@@ -95,8 +95,8 @@ class ShardingContainerPoolBalancerTests
def semaphores(count: Int, max: Int):
IndexedSeq[NestedSemaphore[FullyQualifiedEntityName]] =
IndexedSeq.fill(count)(new NestedSemaphore[FullyQualifiedEntityName](max))
- def lbConfig(blackboxFraction: Double) =
- ShardingContainerPoolBalancerConfig(blackboxFraction, 1)
+ def lbConfig(blackboxFraction: Double, managedFraction: Option[Double] =
None) =
+ ShardingContainerPoolBalancerConfig(managedFraction.getOrElse(1.0 -
blackboxFraction), blackboxFraction, 1)
it should "update invoker's state, growing the slots data and keeping valid
old data" in {
// start empty
@@ -170,6 +170,19 @@ class ShardingContainerPoolBalancerTests
}
}
+ it should "return the same pools if managed- and blackbox-pools are
overlapping" in {
+
+ val state = ShardingContainerPoolBalancerState()(lbConfig(1.0, Some(1.0)))
+ (1 to 100).foreach { i =>
+ state.updateInvokers((1 to i).map(_ => healthy(1,
MemoryLimit.stdMemory)))
+ }
+
+ state.managedInvokers should have size 100
+ state.blackboxInvokers should have size 100
+
+ state.managedInvokers shouldBe state.blackboxInvokers
+ }
+
it should "update the cluster size, adjusting the invoker slots accordingly"
in {
val slots = 10
val memoryPerSlot = MemoryLimit.minMemory