[incubator-openwhisk] branch master updated: Send system overload metric from Controller. (#4131)

2018-11-23 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new fbb37d3  Send system overload metric from Controller. (#4131)
fbb37d3 is described below

commit fbb37d3760322794abe951ef9632ac01ff1be497
Author: Su 
AuthorDate: Fri Nov 23 14:52:25 2018 +0100

Send system overload metric from Controller. (#4131)
---
 common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala   | 2 ++
 .../openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala | 1 +
 2 files changed, 3 insertions(+)

diff --git 
a/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala 
b/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala
index 902bccc..3bc3597 100644
--- a/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala
+++ b/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala
@@ -271,6 +271,8 @@ object LoggingMarkers {
   // Time that is needed to produce message in kafka
   val CONTROLLER_KAFKA = LogMarkerToken(controller, kafka, start)
 
+  // System overload and random invoker assignment
+  val SYSTEM_OVERLOAD = LogMarkerToken(controller, "systemOverload", count)
   /*
* Invoker related markers
*/
diff --git 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 15dbf2e..5348d37 100644
--- 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -582,6 +582,7 @@ object ShardingContainerPoolBalancer extends 
LoadBalancerProvider {
 val random = 
healthyInvokers(ThreadLocalRandom.current().nextInt(healthyInvokers.size)).id
 dispatched(random.toInt).forceAcquireConcurrent(fqn, 
maxConcurrent, slots)
 logging.warn(this, s"system is overloaded. Chose 
invoker${random.toInt} by random assignment.")
+MetricEmitter.emitCounterMetric(LoggingMarkers.SYSTEM_OVERLOAD)
 Some(random)
   } else {
 None



[incubator-openwhisk] branch master updated: Reduce invoker health action memory limit to the minimum available memory limit. (#4136)

2018-11-27 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new a183b3e  Reduce invoker health action memory limit to the minimum 
available memory limit. (#4136)
a183b3e is described below

commit a183b3e17daa18625476d31629385e130c5a01ed
Author: Su 
AuthorDate: Wed Nov 28 08:08:21 2018 +0100

Reduce invoker health action memory limit to the minimum available memory 
limit. (#4136)
---
 .../org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
index 5017ea3..736a6ab 100644
--- 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
+++ 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
@@ -276,7 +276,8 @@ object InvokerPool {
   new WhiskAction(
 namespace = healthActionIdentity.namespace.name.toPath,
 name = EntityName(s"invokerHealthTestAction${i.asString}"),
-exec = CodeExecAsString(manifest, """function main(params) { return 
params; }""", None))
+exec = CodeExecAsString(manifest, """function main(params) { return 
params; }""", None),
+limits = ActionLimits(memory = MemoryLimit(MemoryLimit.minMemory)))
 }
 }
 



[incubator-openwhisk] branch master updated: Ensure ResultMessage is processed. (#4135)

2018-11-28 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 7f571c3  Ensure ResultMessage is processed. (#4135)
7f571c3 is described below

commit 7f571c32bb8f3155c89f1d96fda4320909e097fd
Author: jiangpch 
AuthorDate: Thu Nov 29 15:08:48 2018 +0800

Ensure ResultMessage is processed. (#4135)
---
 .../ShardingContainerPoolBalancer.scala| 29 ++
 1 file changed, 13 insertions(+), 16 deletions(-)

diff --git 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 35a4547..4010cc1 100644
--- 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -175,6 +175,8 @@ class ShardingContainerPoolBalancer(
 
   /** State related to invocations and throttling */
   protected[loadBalancer] val activations = TrieMap[ActivationId, 
ActivationEntry]()
+  protected[loadBalancer] val blockingPromises =
+TrieMap[ActivationId, Promise[Either[ActivationId, WhiskActivation]]]()
   private val activationsPerNamespace = TrieMap[UUID, LongAdder]()
   private val totalActivations = new LongAdder()
   private val totalActivationMemory = new LongAdder()
@@ -262,9 +264,13 @@ class ShardingContainerPoolBalancer(
 
 chosen
   .map { invoker =>
-val entry = setupActivation(msg, action, invoker)
+setupActivation(msg, action, invoker)
 sendActivationToInvoker(messageProducer, msg, invoker).map { _ =>
-  entry.promise.future
+  if (msg.blocking) {
+blockingPromises.getOrElseUpdate(msg.activationId, 
Promise[Either[ActivationId, WhiskActivation]]()).future
+  } else {
+Future.successful(Left(msg.activationId))
+  }
 }
   }
   .getOrElse {
@@ -313,8 +319,7 @@ class ShardingContainerPoolBalancer(
   action.limits.memory.megabytes.MB,
   action.limits.concurrency.maxConcurrent,
   action.fullyQualifiedName(true),
-  timeoutHandler,
-  Promise[Either[ActivationId, WhiskActivation]]())
+  timeoutHandler)
   })
   }
 
@@ -387,9 +392,7 @@ class ShardingContainerPoolBalancer(
 // Resolve the promise to send the result back to the user
 // The activation will be removed from `activations`-map later, when we 
receive the completion message, because the
 // slot of the invoker is not yet free for new activations.
-activations.get(aid).map { entry =>
-  entry.promise.trySuccess(response)
-}
+blockingPromises.remove(aid).map(_.trySuccess(response))
 logging.info(this, s"received result ack for '$aid'")(tid)
   }
 
@@ -422,13 +425,9 @@ class ShardingContainerPoolBalancer(
   .foreach(_.releaseConcurrent(entry.fullyQualifiedEntityName, 
entry.maxConcurrent, entry.memory.toMB.toInt))
 if (!forced) {
   entry.timeoutHandler.cancel()
-  // If the action was blocking and the Resultmessage has been 
received before nothing will happen here.
-  // If the action was blocking and the ResultMessage is still 
missing, we pass the ActivationId. With this Id,
-  // the controller will get the result out of the database.
-  // If the action was non-blocking, we will close the promise here.
-  entry.promise.trySuccess(Left(aid))
 } else {
-  entry.promise.tryFailure(new Throwable("no completion ack received"))
+  // remove blocking promise when timeout, if the ResultMessage is 
already processed, this will do nothing
+  blockingPromises.remove(aid).foreach(_.tryFailure(new Throwable("no 
completion ack received")))
 }
 
 logging.info(this, s"${if (!forced) "received" else "forced"} 
completion ack for '$aid'")(tid)
@@ -717,7 +716,6 @@ case class 
ShardingContainerPoolBalancerConfig(blackboxFraction: Double, timeout
  * @param namespaceId namespace that invoked the action
  * @param invokerName invoker the action is scheduled to
  * @param timeoutHandler times out completion of this activation, should be 
canceled on good paths
- * @param promise the promise to be completed by the activation
  */
 case class ActivationEntry(id: ActivationId,
namespaceId: UUID,
@@ -725,5 +723,4 @@ case class ActivationEntry(id: ActivationId,
memory: ByteSize,
maxConcurrent: Int,
fullyQualifiedEnti

[incubator-openwhisk] branch master updated: Record the blocking activation in the proper map before the request is sent to the invoker. (#4145)

2018-12-05 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 54a2f22  Record the blocking activation in the proper map before the 
request is sent to the invoker. (#4145)
54a2f22 is described below

commit 54a2f228b744f88cfe3186b10f00e9cb80309886
Author: rodric rabbah 
AuthorDate: Thu Dec 6 02:22:40 2018 -0500

Record the blocking activation in the proper map before the request is sent 
to the invoker. (#4145)
---
 .../ShardingContainerPoolBalancer.scala| 57 +-
 .../test/ShardingContainerPoolBalancerTests.scala  |  2 +-
 2 files changed, 36 insertions(+), 23 deletions(-)

diff --git 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 4010cc1..5ddcd11 100644
--- 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -174,8 +174,8 @@ class ShardingContainerPoolBalancer(
   }
 
   /** State related to invocations and throttling */
-  protected[loadBalancer] val activations = TrieMap[ActivationId, 
ActivationEntry]()
-  protected[loadBalancer] val blockingPromises =
+  protected[loadBalancer] val activationSlots = TrieMap[ActivationId, 
ActivationEntry]()
+  protected[loadBalancer] val activationPromises =
 TrieMap[ActivationId, Promise[Either[ActivationId, WhiskActivation]]]()
   private val activationsPerNamespace = TrieMap[UUID, LongAdder]()
   private val totalActivations = new LongAdder()
@@ -264,14 +264,8 @@ class ShardingContainerPoolBalancer(
 
 chosen
   .map { invoker =>
-setupActivation(msg, action, invoker)
-sendActivationToInvoker(messageProducer, msg, invoker).map { _ =>
-  if (msg.blocking) {
-blockingPromises.getOrElseUpdate(msg.activationId, 
Promise[Either[ActivationId, WhiskActivation]]()).future
-  } else {
-Future.successful(Left(msg.activationId))
-  }
-}
+val activationResult = setupActivation(msg, action, invoker)
+sendActivationToInvoker(messageProducer, msg, invoker).map(_ => 
activationResult)
   }
   .getOrElse {
 // report the state of all invokers
@@ -286,10 +280,17 @@ class ShardingContainerPoolBalancer(
   }
   }
 
-  /** 2. Update local state with the to be executed activation */
+  /**
+   * 2. Update local state with the to be executed activation.
+   *
+   * All activations are tracked in the activationSlots map. Additionally, 
blocking invokes
+   * are tracked in the activation results map. When a result is received via 
activeack, it
+   * will cause the result to be forwarded to the caller waiting on the 
result, and cancel
+   * the DB poll which is also trying to do the same.
+   */
   private def setupActivation(msg: ActivationMessage,
   action: ExecutableWhiskActionMetaData,
-  instance: InvokerInstanceId): ActivationEntry = {
+  instance: InvokerInstanceId): 
Future[Either[ActivationId, WhiskActivation]] = {
 
 totalActivations.increment()
 totalActivationMemory.add(action.limits.memory.megabytes)
@@ -301,11 +302,15 @@ class ShardingContainerPoolBalancer(
 // to allow in your topics before you start reporting failed activations.
 val timeout = (action.limits.timeout.duration.max(TimeLimit.STD_DURATION) 
* lbConfig.timeoutFactor) + 1.minute
 
+val resultPromise = if (msg.blocking) {
+  activationPromises.getOrElseUpdate(msg.activationId, 
Promise[Either[ActivationId, WhiskActivation]]()).future
+} else Future.successful(Left(msg.activationId))
+
 // Install a timeout handler for the catastrophic case where an active ack 
is not received at all
 // (because say an invoker is down completely, or the connection to the 
message bus is disrupted) or when
-// the completion ack is significantly delayed (possibly dues to long 
queues but the subject should not be penalized);
+// the active ack is significantly delayed (possibly dues to long queues 
but the subject should not be penalized);
 // in this case, if the activation handler is still registered, remove it 
and update the books.
-activations.getOrElseUpdate(
+activationSlots.getOrElseUpdate(
   msg.activationId, {
 val timeoutHandler = actorSystem.scheduler.scheduleOnce(timeout) {
   processCompletion(msg.activationId, msg.transid, forced = true, 
isSystemError = false, invoker = instance)
@@ -321,6 +326,8 @@ class ShardingContainerPoo

[incubator-openwhisk] branch master updated: Send capacity, system overload metrics for managed and blackbox invokers separately. (#4219)

2019-01-29 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 8cd10bb  Send capacity, system overload metrics for managed and 
blackbox invokers separately. (#4219)
8cd10bb is described below

commit 8cd10bb09d64c1bc2638f05d1fb8dadb24b7f36e
Author: Su 
AuthorDate: Tue Jan 29 13:14:43 2019 +0100

Send capacity, system overload metrics for managed and blackbox invokers 
separately. (#4219)

To send metrics on system overload condition, capacity in flight v/s total 
and count of healthy, unhealthy, unresponsive and down invokers (managed and 
blackbox separately) in order to visualise it as graph on Metrics dashboard.
---
 .../org/apache/openwhisk/common/Logging.scala  |  22 -
 .../ShardingContainerPoolBalancer.scala| 103 +
 .../test/ShardingContainerPoolBalancerTests.scala  |  52 ---
 3 files changed, 143 insertions(+), 34 deletions(-)

diff --git 
a/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala 
b/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala
index c45cb3f..1eb0083 100644
--- a/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala
+++ b/common/scala/src/main/scala/org/apache/openwhisk/common/Logging.scala
@@ -335,14 +335,15 @@ object LoggingMarkers {
   val CONTROLLER_ACTIVATION_BLOCKING_DATABASE_RETRIEVAL =
 LogMarkerToken(controller, "blockingActivationDatabaseRetrieval", count)
 
-  // Time that is needed load balance the activation
+  // Time that is needed to load balance the activation
   val CONTROLLER_LOADBALANCER = LogMarkerToken(controller, loadbalancer, start)
 
   // Time that is needed to produce message in kafka
   val CONTROLLER_KAFKA = LogMarkerToken(controller, kafka, start)
 
   // System overload and random invoker assignment
-  val SYSTEM_OVERLOAD = LogMarkerToken(controller, "systemOverload", count)
+  val MANAGED_SYSTEM_OVERLOAD = LogMarkerToken(controller, 
"managedInvokerSystemOverload", count)
+  val BLACKBOX_SYSTEM_OVERLOAD = LogMarkerToken(controller, 
"blackBoxInvokerSystemOverload", count)
   /*
* Invoker related markers
*/
@@ -355,8 +356,8 @@ object LoggingMarkers {
 
   def LOADBALANCER_ACTIVATIONS_INFLIGHT(controllerInstance: 
ControllerInstanceId) =
 LogMarkerToken(loadbalancer + controllerInstance.asString, 
"activationsInflight", count)
-  def LOADBALANCER_MEMORY_INFLIGHT(controllerInstance: ControllerInstanceId) =
-LogMarkerToken(loadbalancer + controllerInstance.asString, 
"memoryInflight", count)
+  def LOADBALANCER_MEMORY_INFLIGHT(controllerInstance: ControllerInstanceId, 
actionType: String) =
+LogMarkerToken(loadbalancer + controllerInstance.asString, 
s"memory${actionType}Inflight", count)
 
   // Time that is needed to execute the action
   val INVOKER_ACTIVATION_RUN = LogMarkerToken(invoker, "activationRun", start)
@@ -383,6 +384,19 @@ object LoggingMarkers {
   val CONTAINER_CLIENT_RETRIES =
 LogMarkerToken(containerClient, "retries", count)
 
+  val INVOKER_TOTALMEM_BLACKBOX = LogMarkerToken(loadbalancer, 
"totalCapacityBlackBox", count)
+  val INVOKER_TOTALMEM_MANAGED = LogMarkerToken(loadbalancer, 
"totalCapacityManaged", count)
+
+  val HEALTHY_INVOKER_MANAGED = LogMarkerToken(loadbalancer, 
"totalHealthyInvokerManaged", count)
+  val UNHEALTHY_INVOKER_MANAGED = LogMarkerToken(loadbalancer, 
"totalUnhealthyInvokerManaged", count)
+  val UNRESPONSIVE_INVOKER_MANAGED = LogMarkerToken(loadbalancer, 
"totalUnresponsiveInvokerManaged", count)
+  val OFFLINE_INVOKER_MANAGED = LogMarkerToken(loadbalancer, 
"totalOfflineInvokerManaged", count)
+
+  val HEALTHY_INVOKER_BLACKBOX = LogMarkerToken(loadbalancer, 
"totalHealthyInvokerBlackBox", count)
+  val UNHEALTHY_INVOKER_BLACKBOX = LogMarkerToken(loadbalancer, 
"totalUnhealthyInvokerBlackBox", count)
+  val UNRESPONSIVE_INVOKER_BLACKBOX = LogMarkerToken(loadbalancer, 
"totalUnresponsiveInvokerBlackBox", count)
+  val OFFLINE_INVOKER_BLACKBOX = LogMarkerToken(loadbalancer, 
"totalOfflineInvokerBlackBox", count)
+
   // Kafka related markers
   def KAFKA_QUEUE(topic: String) = LogMarkerToken(kafka, topic, count)
   def KAFKA_MESSAGE_DELAY(topic: String) = LogMarkerToken(kafka, topic, start, 
Some("delay"))
diff --git 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 4d24725..914b3ac 100644
--- 
a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPool

[incubator-openwhisk] branch master updated: Revert back to the previous version, as the new base image causes regression in performance tests. (#4353)

2019-03-18 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 58e3ddd  Revert back to the previous version, as the new base image 
causes regression in performance tests. (#4353)
58e3ddd is described below

commit 58e3ddd6229f3fdf9ed3cf54708175efda0d6494
Author: Somaya Jamil 
AuthorDate: Mon Mar 18 15:43:40 2019 +0100

Revert back to the previous version, as the new base image causes 
regression in performance tests. (#4353)

Reverting back to adoptopenjdk/openjdk8:x86_64-alpine-jdk8u172-b11 from 
adoptopenjdk/openjdk8:x86_64-alpine-jdk8u202-b08 (commit) as we see regression 
in Latency.

After briefly debugging, we found that the time was lost between 
Controller->Invoker & Invoker->Controller (as reverting version already helps 
to improve the problem there seems to be an issue with the communication with 
kafka). We didnt find a quick solution so reverting back to the old version, 
and in the meanwhile will continue to look for the root cause.
---
 common/scala/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/common/scala/Dockerfile b/common/scala/Dockerfile
index 9505c1d..2776d94 100644
--- a/common/scala/Dockerfile
+++ b/common/scala/Dockerfile
@@ -1,7 +1,7 @@
 # Licensed to the Apache Software Foundation (ASF) under one or more 
contributor
 # license agreements; and to You under the Apache License, Version 2.0.
 
-FROM adoptopenjdk/openjdk8:x86_64-alpine-jdk8u202-b08
+FROM adoptopenjdk/openjdk8:x86_64-alpine-jdk8u172-b11
 
 ENV LANG en_US.UTF-8
 ENV LANGUAGE en_US:en



[incubator-openwhisk] branch master updated: Make limit overcommit relative to the actual cluster size. (#3592)

2018-05-03 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new d3a0bb4  Make limit overcommit relative to the actual cluster size. 
(#3592)
d3a0bb4 is described below

commit d3a0bb48faf82a668ad9dc7eecf6f9b630c6189c
Author: Markus Thömmes 
AuthorDate: Thu May 3 13:08:25 2018 +0200

Make limit overcommit relative to the actual cluster size. (#3592)

The overcommit of limits needs to be relative to the actual size of the 
cluster to appropriately scale those limits.
---
 .../src/main/scala/whisk/core/entitlement/Entitlement.scala| 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/core/controller/src/main/scala/whisk/core/entitlement/Entitlement.scala 
b/core/controller/src/main/scala/whisk/core/entitlement/Entitlement.scala
index 478bda4..031c267 100644
--- a/core/controller/src/main/scala/whisk/core/entitlement/Entitlement.scala
+++ b/core/controller/src/main/scala/whisk/core/entitlement/Entitlement.scala
@@ -67,8 +67,7 @@ protected[core] object EntitlementProvider {
 WhiskConfig.actionInvokePerMinuteLimit -> null,
 WhiskConfig.actionInvokeConcurrentLimit -> null,
 WhiskConfig.triggerFirePerMinuteLimit -> null,
-WhiskConfig.actionInvokeSystemOverloadLimit -> null,
-WhiskConfig.controllerInstances -> null)
+WhiskConfig.actionInvokeSystemOverloadLimit -> null)
 }
 
 /**
@@ -86,8 +85,8 @@ protected[core] abstract class EntitlementProvider(
* Allows 20% of additional requests on top of the limit to mitigate 
possible unfair round-robin loadbalancing between
* controllers
*/
-  private val overcommit = if (config.controllerInstances.toInt > 1) 1.2 else 1
-  private def dilateLimit(limit: Int): Int = Math.ceil(limit.toDouble * 
overcommit).toInt
+  private def overcommit(clusterSize: Int) = if (clusterSize > 1) 1.2 else 1
+  private def dilateLimit(limit: Int): Int = Math.ceil(limit.toDouble * 
overcommit(loadBalancer.clusterSize)).toInt
 
   /**
* Calculates a possibly dilated limit relative to the current user.

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Limit cipher suites used for Kafka SSL. (#3604)

2018-05-08 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 939c3d7  Limit cipher suites used for Kafka SSL. (#3604)
939c3d7 is described below

commit 939c3d797b54a870efbb085c14eb7fb4201d8fbb
Author: Vadim Raskin 
AuthorDate: Tue May 8 09:53:41 2018 +0200

Limit cipher suites used for Kafka SSL. (#3604)
---
 ansible/group_vars/all   | 7 +++
 ansible/roles/kafka/tasks/deploy.yml | 1 +
 2 files changed, 8 insertions(+)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 977d6ed..d4258b8 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -108,6 +108,13 @@ kafka:
 keystore:
   name: kafka-keystore.jks
   password: openwhisk
+cipher_suites:
+- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
+- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
+- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
+- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
+- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
   protocol: "{{ kafka_protocol_for_setup }}"
   version: 0.11.0.1
   port: 9072
diff --git a/ansible/roles/kafka/tasks/deploy.yml 
b/ansible/roles/kafka/tasks/deploy.yml
index ae4a7df..244c997 100644
--- a/ansible/roles/kafka/tasks/deploy.yml
+++ b/ansible/roles/kafka/tasks/deploy.yml
@@ -63,6 +63,7 @@
   "KAFKA_SSL_TRUSTSTORE_LOCATION": "/config/{{ kafka.ssl.keystore.name }}"
   "KAFKA_SSL_TRUSTSTORE_PASSWORD": "{{ kafka.ssl.keystore.password }}"
   "KAFKA_SSL_CLIENT_AUTH": "{{ kafka.ssl.client_authentication }}"
+  "KAFKA_SSL_CIPHER_SUITES": "{{ kafka.ssl.cipher_suites | join(',') }}"
 # The sed script passed in CUSTOM_INIT_SCRIPT fixes a bug in the 
wurstmeister dcoker image
 # by patching the server.configuration file right before kafka is started.
 # The script adds the missing advertized hostname to the 
advertised.listener property

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Assure nginx verifies downstream ssl certs. (#3658)

2018-05-16 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 22c0807  Assure nginx verifies downstream ssl certs. (#3658)
22c0807 is described below

commit 22c0807b9eae20af9acc70b91a1943dd35a95092
Author: Vadim Raskin 
AuthorDate: Wed May 16 13:00:16 2018 +0200

Assure nginx verifies downstream ssl certs. (#3658)
---
 ansible/roles/nginx/templates/nginx.conf.j2 | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 
b/ansible/roles/nginx/templates/nginx.conf.j2
index 61ea6fb..a86593f 100644
--- a/ansible/roles/nginx/templates/nginx.conf.j2
+++ b/ansible/roles/nginx/templates/nginx.conf.j2
@@ -26,6 +26,8 @@ http {
 {% if controller.protocol == 'https' %}
 proxy_ssl_session_reuse on;
 proxy_ssl_name {{ controller.ssl.cn }};
+proxy_ssl_verify on;
+proxy_ssl_trusted_certificate /etc/nginx/{{ controller.ssl.cert }};
 proxy_ssl_protocols TLSv1.1 TLSv1.2;
 proxy_ssl_certificate /etc/nginx/{{ controller.ssl.cert }};
 proxy_ssl_certificate_key /etc/nginx/{{ controller.ssl.key }};
@@ -90,7 +92,6 @@ http {
 ssl_protocolsTLSv1.2;
 ssl_ciphers 
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256;
 ssl_prefer_server_ciphers on;
-proxy_ssl_verify off;
 proxy_ssl_session_reuse on;
 
 # proxy to the web action path

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Fixes asymmetry between reading and writing of ByteSize. (#3668)

2018-05-18 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 7082e94  Fixes asymmetry between reading and writing of ByteSize. 
(#3668)
7082e94 is described below

commit 7082e947cedafe35bac90df7f54fbce98596d853
Author: rodric rabbah 
AuthorDate: Fri May 18 08:08:54 2018 -0400

Fixes asymmetry between reading and writing of ByteSize. (#3668)

Constructing a ByteSize form String will accept MB or M, KB or K (and is 
case insenstive).
This is to match the toString method which writes out the units as MB or KB 
(so that a serialized value can be parsed back to the identity).
---
 .../src/main/scala/whisk/core/entity/Size.scala| 25 ++---
 .../scala/whisk/core/entity/test/SizeTests.scala   | 32 +-
 2 files changed, 40 insertions(+), 17 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/core/entity/Size.scala 
b/common/scala/src/main/scala/whisk/core/entity/Size.scala
index 8273418..61e27b4 100644
--- a/common/scala/src/main/scala/whisk/core/entity/Size.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/Size.scala
@@ -79,18 +79,23 @@ case class ByteSize(size: Long, unit: SizeUnits.Unit) 
extends Ordered[ByteSize]
 }
 
 object ByteSize {
+  private val regex = """(?i)\s?(\d+)\s?(MB|KB|B|M|K)\s?""".r.pattern
+  protected[entity] val formatError = """Size Unit not supported. Only "B", 
"K[B]" and "M[B]" are supported."""
+
   def fromString(sizeString: String): ByteSize = {
-val unitprefix = sizeString.takeRight(1).toUpperCase
-val size = sizeString.dropRight(1).trim.toLong
-
-val unit = unitprefix match {
-  case "B" => SizeUnits.BYTE
-  case "K" => SizeUnits.KB
-  case "M" => SizeUnits.MB
-  case _   => throw new IllegalArgumentException("""Size Unit not 
supported. Only "B", "K" and "M" are supported.""")
-}
+val matcher = regex.matcher(sizeString)
+if (matcher.matches()) {
+  val size = matcher.group(1).toInt
+  val unit = matcher.group(2).charAt(0).toUpper match {
+case 'B' => SizeUnits.BYTE
+case 'K' => SizeUnits.KB
+case 'M' => SizeUnits.MB
+  }
 
-ByteSize(size, unit)
+  ByteSize(size, unit)
+} else {
+  throw new IllegalArgumentException(formatError)
+}
   }
 }
 
diff --git a/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala 
b/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala
index 16bca03..250cec0 100644
--- a/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala
+++ b/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala
@@ -119,24 +119,42 @@ class SizeTests extends FlatSpec with Matchers {
 
   // Create ObjectSize from String
   it should "create ObjectSize from String 3B" in {
-val fromString = ByteSize.fromString("3B")
-fromString equals (3 B)
+ByteSize.fromString("3b") equals (3 B)
+ByteSize.fromString("3B") equals (3 B)
+ByteSize.fromString("3 b") equals (3 B)
+ByteSize.fromString("3 B") equals (3 B)
   }
 
   it should "create ObjectSize from String 7K" in {
-val fromString = ByteSize.fromString("7K")
-fromString equals (7 KB)
+ByteSize.fromString("7k") equals (7 KB)
+ByteSize.fromString("7K") equals (7 KB)
+ByteSize.fromString("7KB") equals (7 KB)
+ByteSize.fromString("7kB") equals (7 KB)
+ByteSize.fromString("7kb") equals (7 KB)
+ByteSize.fromString("7 k") equals (7 KB)
+ByteSize.fromString("7 K") equals (7 KB)
+ByteSize.fromString("7 KB") equals (7 KB)
+ByteSize.fromString("7 kB") equals (7 KB)
+ByteSize.fromString("7 kb") equals (7 KB)
   }
 
   it should "create ObjectSize from String 120M" in {
-val fromString = ByteSize.fromString("120M")
-fromString equals (120 MB)
+ByteSize.fromString("120m") equals (120 MB)
+ByteSize.fromString("120M") equals (120 MB)
+ByteSize.fromString("120MB") equals (120 MB)
+ByteSize.fromString("120mB") equals (120 MB)
+ByteSize.fromString("120mb") equals (120 MB)
+ByteSize.fromString("120 m") equals (120 MB)
+ByteSize.fromString("120 M") equals (120 MB)
+ByteSize.fromString("120 MB") equals (120 MB)
+ByteSize.fromString("120 mB") equals (120 MB)
+ByteSize.fromString("120 mb") equals (120 MB)
   }
 
   it should "throw error on creating ObjectSize from String 120A" in {
 the[IllegalArgumentException] thrownBy {
   ByteSize.fromString("120A")
-} should have message """Size Unit not supported. Only "B", "K" and "M" 
are supported."""
+} should have message ByteSize.formatError
   }
 
   it should "throw error on creating ByteSize object with negative size" in {

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch revert-3702-asf-xml deleted (was 582d576)

2018-05-25 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a change to branch revert-3702-asf-xml
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git.


 was 582d576  Revert "Add ASF license to optional XML files and last gradle 
file."

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Add equals method to ensure that ByteSize instances satisfy Comparable contract. (#3697)

2018-06-05 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new d89e1d0  Add equals method to ensure that ByteSize instances satisfy 
Comparable contract. (#3697)
d89e1d0 is described below

commit d89e1d01fd7b14c8c86e2c35ffbc0996e29d70d5
Author: rodric rabbah 
AuthorDate: Tue Jun 5 05:03:38 2018 -0400

Add equals method to ensure that ByteSize instances satisfy Comparable 
contract. (#3697)

Also adjust tests which were not checking conditions correctly for tests to 
fail when invariants are violated.
---
 .../src/main/scala/whisk/core/entity/Size.scala|  7 +-
 .../scala/whisk/core/entity/test/SizeTests.scala   | 78 --
 2 files changed, 49 insertions(+), 36 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/core/entity/Size.scala 
b/common/scala/src/main/scala/whisk/core/entity/Size.scala
index 2d5b7d9..a51eb2e 100644
--- a/common/scala/src/main/scala/whisk/core/entity/Size.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/Size.scala
@@ -22,7 +22,7 @@ import java.nio.charset.StandardCharsets
 import com.typesafe.config.ConfigValue
 import pureconfig._
 import spray.json._
-import whisk.core.entity.ByteSize.formatError
+import ByteSize.formatError
 
 object SizeUnits extends Enumeration {
 
@@ -71,6 +71,11 @@ case class ByteSize(size: Long, unit: SizeUnits.Unit) 
extends Ordered[ByteSize]
 
   def compare(other: ByteSize) = toBytes compare other.toBytes
 
+  override def equals(that: Any): Boolean = that match {
+case t: ByteSize => compareTo(t) == 0
+case _   => false
+  }
+
   override def toString = {
 unit match {
   case SizeUnits.BYTE => s"$size B"
diff --git a/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala 
b/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala
index 250cec0..c74f146 100644
--- a/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala
+++ b/tests/src/test/scala/whisk/core/entity/test/SizeTests.scala
@@ -23,6 +23,7 @@ import org.junit.runner.RunWith
 import org.scalatest.FlatSpec
 import org.scalatest.Matchers
 import org.scalatest.junit.JUnitRunner
+import spray.json._
 import whisk.core.entity.size.SizeInt
 import whisk.core.entity.ByteSize
 
@@ -37,8 +38,8 @@ class SizeTests extends FlatSpec with Matchers {
 val oneKB = 1 KB
 val oneMB = 1 MB
 
-oneByte < oneKB should be(true)
-oneKB < oneMB should be(true)
+oneByte should be < oneKB
+oneKB should be < oneMB
   }
 
   it should "3 Bytes smaller than 2 KB smaller than 1 MB" in {
@@ -46,8 +47,8 @@ class SizeTests extends FlatSpec with Matchers {
 val myKBs = 2 KB
 val myMBs = 1 MB
 
-myBytes < myKBs should be(true)
-myKBs < myMBs should be(true)
+myBytes should be < myKBs
+myKBs should be < myMBs
   }
 
   it should "1 MB greater than 1 KB greater than 1 Byte" in {
@@ -55,17 +56,17 @@ class SizeTests extends FlatSpec with Matchers {
 val oneKB = 1 KB
 val oneMB = 1 MB
 
-oneMB > oneKB should be(true)
-oneKB > oneByte should be(true)
+oneMB should be > oneKB
+oneKB should be > oneByte
   }
 
   it should "1 MB == 1024 KB == 1048576 B" in {
-val myBytes = 1048576 B
+val myBytes = (1 << 20) B
 val myKBs = 1024 KB
 val myMBs = 1 MB
 
-myBytes equals (myKBs)
-myKBs equals (myMBs)
+myBytes should equal(myKBs)
+myKBs should equal(myMBs)
   }
 
   // Addition
@@ -98,7 +99,7 @@ class SizeTests extends FlatSpec with Matchers {
   }
 
   it should "1048576 B to MB = 1" in {
-(1048576 B).toMB should be(1)
+((1 << 20) B).toMB should be(1)
   }
 
   it should "1 KB to B = 1024" in {
@@ -110,7 +111,7 @@ class SizeTests extends FlatSpec with Matchers {
   }
 
   it should "1 MB to B = 1048576" in {
-(1 MB).toBytes should be(1048576)
+(1 MB).toBytes should be(1 << 20)
   }
 
   it should "1 MB to KB = 1024" in {
@@ -119,36 +120,43 @@ class SizeTests extends FlatSpec with Matchers {
 
   // Create ObjectSize from String
   it should "create ObjectSize from String 3B" in {
-ByteSize.fromString("3b") equals (3 B)
-ByteSize.fromString("3B") equals (3 B)
-ByteSize.fromString("3 b") equals (3 B)
-ByteSize.fromString("3 B") equals (3 B)
+ByteSize.fromString("3b") should be(3 B)
+ByteSize.fromString("3B") should be(3 B)
+ByteSize.fromString("3 b") should be(3 B)
+ByteSize.fromString("3 B") should be(3 B)
   }
 
   it should "create ObjectSize from String 7K" in {
-ByteSize.fromString("7k") equals (7 KB)
-ByteSize.fromString("7K") equals (7 K

[incubator-openwhisk] branch master updated: Refactor `ensureTopic` to expose failure details. (#3686)

2018-06-12 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 74216e1  Refactor `ensureTopic` to expose failure details. (#3686)
74216e1 is described below

commit 74216e131c58e9dac4bbb2f1fcd2a44495b9988d
Author: Markus Thömmes 
AuthorDate: Tue Jun 12 13:37:08 2018 +0200

Refactor `ensureTopic` to expose failure details. (#3686)

ensureTopic returns a `Boolean` value of whether it successfully created a 
topic or not.

This changes that behavior to actually return the Exception in case of an 
error. That enables the client-side code to handle (or log) that failure 
appropriately while maintaining the ease of checking a successful result by 
using `isSuccess`.
---
 .../CausedBy.scala}| 33 +++
 .../connector/kafka/KafkaMessagingProvider.scala   | 47 ++
 .../whisk/core/connector/MessagingProvider.scala   |  4 +-
 .../scala/whisk/core/controller/Controller.scala   | 20 +
 .../main/scala/whisk/core/invoker/Invoker.scala|  2 +-
 .../test/scala/services/KafkaConnectorTests.scala  |  8 ++--
 6 files changed, 53 insertions(+), 61 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/core/connector/MessagingProvider.scala 
b/common/scala/src/main/scala/whisk/common/CausedBy.scala
similarity index 51%
copy from 
common/scala/src/main/scala/whisk/core/connector/MessagingProvider.scala
copy to common/scala/src/main/scala/whisk/common/CausedBy.scala
index 8ec1f5a..caa2ba4 100644
--- a/common/scala/src/main/scala/whisk/core/connector/MessagingProvider.scala
+++ b/common/scala/src/main/scala/whisk/common/CausedBy.scala
@@ -15,26 +15,21 @@
  * limitations under the License.
  */
 
-package whisk.core.connector
-
-import akka.actor.ActorSystem
-
-import scala.concurrent.duration.DurationInt
-import scala.concurrent.duration.FiniteDuration
-import whisk.common.Logging
-import whisk.core.WhiskConfig
-import whisk.spi.Spi
+package whisk.common
 
 /**
- * An Spi for providing Messaging implementations.
+ * Helper to match on exceptions caused by other exceptions.
+ *
+ * Use this like:
+ *
+ * ```
+ * try {
+ *   block()
+ * } catch {
+ *   case CausedBy(internalException: MyFancyException) => ...
+ * }
+ * ```
  */
-trait MessagingProvider extends Spi {
-  def getConsumer(
-config: WhiskConfig,
-groupId: String,
-topic: String,
-maxPeek: Int = Int.MaxValue,
-maxPollInterval: FiniteDuration = 5.minutes)(implicit logging: Logging, 
actorSystem: ActorSystem): MessageConsumer
-  def getProducer(config: WhiskConfig)(implicit logging: Logging, actorSystem: 
ActorSystem): MessageProducer
-  def ensureTopic(config: WhiskConfig, topic: String, topicConfig: 
String)(implicit logging: Logging): Boolean
+object CausedBy {
+  def unapply(e: Throwable): Option[Throwable] = Option(e.getCause)
 }
diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
index e939a46..7351b64 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
@@ -18,18 +18,18 @@
 package whisk.connector.kafka
 
 import java.util.Properties
-import java.util.concurrent.ExecutionException
 
 import akka.actor.ActorSystem
 import org.apache.kafka.clients.admin.{AdminClient, AdminClientConfig, 
NewTopic}
 import org.apache.kafka.common.errors.TopicExistsException
 import pureconfig._
-import whisk.common.Logging
+import whisk.common.{CausedBy, Logging}
 import whisk.core.{ConfigKeys, WhiskConfig}
 import whisk.core.connector.{MessageConsumer, MessageProducer, 
MessagingProvider}
 
 import scala.collection.JavaConverters._
 import scala.concurrent.duration.FiniteDuration
+import scala.util.{Failure, Success, Try}
 
 case class KafkaConfig(replicationFactor: Short)
 
@@ -47,31 +47,28 @@ object KafkaMessagingProvider extends MessagingProvider {
   def getProducer(config: WhiskConfig)(implicit logging: Logging, actorSystem: 
ActorSystem): MessageProducer =
 new KafkaProducerConnector(config.kafkaHosts)
 
-  def ensureTopic(config: WhiskConfig, topic: String, topicConfig: 
String)(implicit logging: Logging): Boolean = {
-val kc = loadConfigOrThrow[KafkaConfig](ConfigKeys.kafka)
-val tc = KafkaConfiguration.configMapToKafkaConfig(
-  loadConfigOrThrow[Map[String, String]](ConfigKeys.kafkaTopics + 
s".$topicConfig"))
+  def ensureTopic(config: WhiskConfig, topic: String, topicConfigKey: 
String)(implicit logging: Logging): Try[Unit] = {
+val kafkaConfig = loadConfigOrThrow[KafkaConfig](ConfigKeys.kafka)
+val topicConfig = KafkaConfiguration.configMapToKafkaConfig(
+  loadConfigOrThr

[incubator-openwhisk] branch master updated: Retry `ensureTopic` on transient, retriable exceptions. (#3753)

2018-06-15 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 3c816aa  Retry `ensureTopic` on transient, retriable exceptions. 
(#3753)
3c816aa is described below

commit 3c816aa6e326e643da5ebb8d5ad504578a597b9b
Author: Markus Thömmes 
AuthorDate: Fri Jun 15 17:08:35 2018 +0200

Retry `ensureTopic` on transient, retriable exceptions. (#3753)

Like writing and reading from Kafka, creating topics can be subject to a 
battery of transient errors. Retrying those errors is safe and keeps us sane.
---
 .../connector/kafka/KafkaMessagingProvider.scala   | 30 ++
 1 file changed, 19 insertions(+), 11 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
index 7351b64..6843cbc 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaMessagingProvider.scala
@@ -21,14 +21,14 @@ import java.util.Properties
 
 import akka.actor.ActorSystem
 import org.apache.kafka.clients.admin.{AdminClient, AdminClientConfig, 
NewTopic}
-import org.apache.kafka.common.errors.TopicExistsException
+import org.apache.kafka.common.errors.{RetriableException, 
TopicExistsException}
 import pureconfig._
 import whisk.common.{CausedBy, Logging}
 import whisk.core.{ConfigKeys, WhiskConfig}
 import whisk.core.connector.{MessageConsumer, MessageProducer, 
MessagingProvider}
 
 import scala.collection.JavaConverters._
-import scala.concurrent.duration.FiniteDuration
+import scala.concurrent.duration._
 import scala.util.{Failure, Success, Try}
 
 case class KafkaConfig(replicationFactor: Short)
@@ -57,15 +57,23 @@ object KafkaMessagingProvider extends MessagingProvider {
 val partitions = 1
 val nt = new NewTopic(topic, partitions, 
kafkaConfig.replicationFactor).configs(topicConfig.asJava)
 
-val result = 
Try(client.createTopics(List(nt).asJava).values().get(topic).get())
-  .map(_ => logging.info(this, s"created topic $topic"))
-  .recoverWith {
-case CausedBy(_: TopicExistsException) =>
-  Success(logging.info(this, s"topic $topic already existed"))
-case t =>
-  logging.error(this, s"ensureTopic for $topic failed due to $t")
-  Failure(t)
-  }
+def createTopic(retries: Int = 5): Try[Unit] = {
+  Try(client.createTopics(List(nt).asJava).values().get(topic).get())
+.map(_ => logging.info(this, s"created topic $topic"))
+.recoverWith {
+  case CausedBy(_: TopicExistsException) =>
+Success(logging.info(this, s"topic $topic already existed"))
+  case CausedBy(t: RetriableException) if retries > 0 =>
+logging.warn(this, s"topic $topic could not be created because of 
$t, retries left: $retries")
+Thread.sleep(1.second.toMillis)
+createTopic(retries - 1)
+  case t =>
+logging.error(this, s"ensureTopic for $topic failed due to $t")
+Failure(t)
+}
+}
+
+val result = createTopic()
 
 client.close()
 result

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Only send kafka offset metrics, if offsets are meaningful. (#3780)

2018-06-19 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 59ba42b  Only send kafka offset metrics, if offsets are meaningful. 
(#3780)
59ba42b is described below

commit 59ba42b577f9c8b7c7911da26ee9bae9d5e4d1a2
Author: Markus Thömmes 
AuthorDate: Tue Jun 19 11:56:39 2018 +0200

Only send kafka offset metrics, if offsets are meaningful. (#3780)

The internal `offset` held by the KafkaConsumerConnector starts at 0 but is 
only meaningful after the first message has been read from Kafka. In case of an 
Invoker restart for example, the reported offset difference might be absurdly 
high, because the offset in Zookeeper is high, while the internal offset is 
still 0.
---
 .../scala/whisk/connector/kafka/KafkaConsumerConnector.scala | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
index e551f5b..eeec4a4 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
@@ -146,11 +146,13 @@ class KafkaConsumerConnector(
   Scheduler.scheduleWaitAtMost(cfg.metricFlushIntervalS.seconds, 10.seconds, 
"kafka-lag-monitor") { () =>
 Future {
   blocking {
-val topicAndPartition = new TopicPartition(topic, 0)
-
consumer.endOffsets(Set(topicAndPartition).asJava).asScala.get(topicAndPartition).foreach
 { endOffset =>
-  // endOffset could lag behind the offset reported by the consumer 
internally resulting in negative numbers
-  val queueSize = (endOffset - offset).max(0)
-  MetricEmitter.emitHistogramMetric(queueMetric, queueSize)
+if (offset > 0) {
+  val topicAndPartition = new TopicPartition(topic, 0)
+  
consumer.endOffsets(Set(topicAndPartition).asJava).asScala.get(topicAndPartition).foreach
 { endOffset =>
+// endOffset could lag behind the offset reported by the consumer 
internally resulting in negative numbers
+val queueSize = (endOffset - offset).max(0)
+MetricEmitter.emitHistogramMetric(queueMetric, queueSize)
+  }
 }
   }
 }



[incubator-openwhisk] branch master updated: Add documentation to the loadbalancer. (#3778)

2018-07-03 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 06818a4  Add documentation to the loadbalancer. (#3778)
06818a4 is described below

commit 06818a4a8056aea4c0c555033f2a947ff15e33fa
Author: Markus Thömmes 
AuthorDate: Tue Jul 3 09:00:04 2018 +0200

Add documentation to the loadbalancer. (#3778)

* Add documentation to the loadbalancer.

* Add information on the overflow and other edge cases.

* Incooperating more feedback to make prose description clearer.

* Clarify capacity determination.

* Clarify health protocol.
---
 .../ShardingContainerPoolBalancer.scala| 90 +-
 1 file changed, 88 insertions(+), 2 deletions(-)

diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index eac4aff..72124ec 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -45,10 +45,96 @@ import scala.concurrent.{ExecutionContext, Future, Promise}
 import scala.util.{Failure, Success}
 
 /**
- * A loadbalancer that uses "horizontal" sharding to not collide with fellow 
loadbalancers.
+ * A loadbalancer that schedules workload based on a hashing-algorithm.
+ *
+ * ## Algorithm
+ *
+ * At first, for every namespace + action pair a hash is calculated and then 
an invoker is picked based on that hash
+ * (`hash % numInvokers`). The determined index is the so called 
"home-invoker". This is the invoker where the following
+ * progression will **always** start. If this invoker is healthy (see "Invoker 
health checking") and if there is
+ * capacity on that invoker (see "Capacity checking"), the request is 
scheduled to it.
+ *
+ * If one of these prerequisites is not true, the index is incremented by a 
step-size. The step-sizes available are the
+ * all coprime numbers smaller than the amount of invokers available (coprime, 
to minimize collisions while progressing
+ * through the invokers). The step-size is picked by the same hash calculated 
above (`hash & numStepSizes`). The
+ * home-invoker-index is now incremented by the step-size and the checks 
(healthy + capacity) are done on the invoker
+ * we land on now.
+ *
+ * This procedure is repeated until all invokers have been checked at which 
point the "overload" strategy will be
+ * employed, which is to choose a healthy invoker randomly. In a steadily 
running system, that overload means that there
+ * is no capacity on any invoker left to schedule the current request to.
+ *
+ * If no invokers are available or if there are no healthy invokers in the 
system, the loadbalancer will return an error
+ * stating that no invokers are available to take any work. Requests are not 
queued anywhere in this case.
+ *
+ * An example:
+ * - availableInvokers: 10 (all healthy)
+ * - hash: 13
+ * - homeInvoker: hash % availableInvokers = 13 % 10 = 3
+ * - stepSizes: 1, 3, 7 (note how 2 and 5 is not part of this because it's not 
coprime to 10)
+ * - stepSizeIndex: hash % numStepSizes = 13 % 3 = 1 => stepSize = 3
+ *
+ * Progression to check the invokers: 3, 6, 9, 2, 5, 8, 1, 4, 7, 0 --> done
+ *
+ * This heuristic is based on the assumption, that the chance to get a warm 
container is the best on the home invoker
+ * and degrades the more steps you make. The hashing makes sure that all 
loadbalancers in a cluster will always pick the
+ * same home invoker and do the same progression for a given action.
+ *
+ * Known caveats:
+ * - This assumption is not always true. For instance, two heavy workloads 
landing on the same invoker can override each
+ *   other, which results in many cold starts due to all containers being 
evicted by the invoker to make space for the
+ *   "other" workload respectively. Future work could be to keep a buffer of 
invokers last scheduled for each action and
+ *   to prefer to pick that one. Then the second-last one and so forth.
+ *
+ * ## Capacity checking
+ *
+ * The maximum capacity per invoker is configured using 
`invoker-busy-threshold`, which is the maximum amount of actions
+ * running in parallel on that invoker.
+ *
+ * Spare capacity is determined by what the loadbalancer thinks it scheduled 
to each invoker. Upon scheduling, an entry
+ * is made to update the books and a slot in a Semaphore is taken. That slot 
is only released after the response from
+ * the invoker (active-ack) arrives **or** after the active-ack times out. The 
Semaphore has as many slots as are
+ * configured via `invoker-busy-th

[incubator-openwhisk] branch master updated: Use a PoolingConnectionManager even for single connection use-cases. (#3836)

2018-07-03 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new b288435  Use a PoolingConnectionManager even for single connection 
use-cases. (#3836)
b288435 is described below

commit b288435dfdb3c0f6bc5622d6cbb04aaa233f28b4
Author: Markus Thömmes 
AuthorDate: Tue Jul 3 18:00:20 2018 +0200

Use a PoolingConnectionManager even for single connection use-cases. (#3836)

The PoolingConnectionManager checks connections for their staleness, which 
is important because we're pausing/resuming containers all the time. 
Connections can go stale in this process.
---
 .../scala/whisk/core/containerpool/HttpUtils.scala | 26 --
 1 file changed, 19 insertions(+), 7 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala 
b/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala
index 4ff205d..c5faab4 100644
--- a/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala
+++ b/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala
@@ -38,7 +38,7 @@ import org.apache.http.client.utils.{HttpClientUtils, 
URIBuilder}
 import org.apache.http.conn.HttpHostConnectException
 import org.apache.http.entity.StringEntity
 import org.apache.http.impl.client.HttpClientBuilder
-import org.apache.http.impl.conn.{BasicHttpClientConnectionManager, 
PoolingHttpClientConnectionManager}
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager
 import spray.json._
 import whisk.common.Logging
 import whisk.common.TransactionId
@@ -62,6 +62,12 @@ import whisk.core.entity.size.SizeLong
 protected class HttpUtils(hostname: String, timeout: FiniteDuration, 
maxResponse: ByteSize, maxConcurrent: Int = 1)(
   implicit logging: Logging) {
 
+  /**
+   * Closes the HttpClient and all resources allocated by it.
+   *
+   * This will close the HttpClient that is generated for this instance of 
HttpUtils. That will also cause the
+   * ConnectionManager to be closed alongside.
+   */
   def close() = Try(connection.close())
 
   /**
@@ -159,15 +165,21 @@ protected class HttpUtils(hostname: String, timeout: 
FiniteDuration, maxResponse
 
   private val connection = HttpClientBuilder.create
 .setDefaultRequestConfig(httpconfig)
-.setConnectionManager(if (maxConcurrent > 1) {
-  // Use PoolingHttpClientConnectionManager so that concurrent activation 
processing (if enabled) will reuse connections
-  val cm = new PoolingHttpClientConnectionManager
-  // Increase default max connections per route (default is 2)
+.setConnectionManager({
+  // A PoolingHttpClientConnectionManager is the default when not 
specifying any ConnectionManager.
+  // The PoolingHttpClientConnectionManager has the benefit of actively 
checking if a connection has become stale,
+  // which is very important because pausing/resuming containers can cause 
a connection to become silently broken.
+  // This causes very subtle bugs, especially when containers are reused 
after a pretty long time (like > 5 minutes).
+  //
+  // The BasicHttpClientConnectionManager (which would be alternative 
here) doesn't have such a mechanism and thus
+  // isn't suitable for our usage.
+  val cm = new PoolingHttpClientConnectionManager()
+  // perRoute effectively means per host in our use-case, which means 
setting it to the same value as the maximum
+  // total of all connections in the pool is appropriate here.
   cm.setDefaultMaxPerRoute(maxConcurrent)
-  // Increase max total connections (default is 20)
   cm.setMaxTotal(maxConcurrent)
   cm
-} else new BasicHttpClientConnectionManager()) // set the Pooling 
connection manager IFF maxConcurrent > 1
+})
 .useSystemProperties()
 .disableAutomaticRetries()
 .build



[incubator-openwhisk] branch master updated: Revert hardening of HttpUtils. (#3842)

2018-07-04 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new fc5798b  Revert hardening of HttpUtils. (#3842)
fc5798b is described below

commit fc5798bb299e1f993449472cecbb55ffce1ebe62
Author: Markus Thömmes 
AuthorDate: Wed Jul 4 16:43:10 2018 +0200

Revert hardening of HttpUtils. (#3842)

The changes introduced here are subject so latency and stability problems 
in larger environments. Especially the impact of consuming the whole entity 
needs to be carefully asserted and verified before getting it back in.
---
 .../src/main/scala/whisk/core/containerpool/HttpUtils.scala  | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala 
b/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala
index c5faab4..bf95441 100644
--- a/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala
+++ b/common/scala/src/main/scala/whisk/core/containerpool/HttpUtils.scala
@@ -34,7 +34,7 @@ import org.apache.http.HttpHeaders
 import org.apache.http.client.config.RequestConfig
 import org.apache.http.client.methods.HttpPost
 import org.apache.http.client.methods.HttpRequestBase
-import org.apache.http.client.utils.{HttpClientUtils, URIBuilder}
+import org.apache.http.client.utils.URIBuilder
 import org.apache.http.conn.HttpHostConnectException
 import org.apache.http.entity.StringEntity
 import org.apache.http.impl.client.HttpClientBuilder
@@ -121,8 +121,7 @@ protected class HttpUtils(hostname: String, timeout: 
FiniteDuration, maxResponse
   Left(NoResponseReceived())
 }
 
-  // Fully consumes the entity and closes the response
-  HttpClientUtils.closeQuietly(response)
+  response.close()
   containerResponse
 } recoverWith {
   // The route to target socket as well as the target socket itself may 
need some time to be available -
@@ -165,7 +164,7 @@ protected class HttpUtils(hostname: String, timeout: 
FiniteDuration, maxResponse
 
   private val connection = HttpClientBuilder.create
 .setDefaultRequestConfig(httpconfig)
-.setConnectionManager({
+.setConnectionManager(if (maxConcurrent > 1) {
   // A PoolingHttpClientConnectionManager is the default when not 
specifying any ConnectionManager.
   // The PoolingHttpClientConnectionManager has the benefit of actively 
checking if a connection has become stale,
   // which is very important because pausing/resuming containers can cause 
a connection to become silently broken.
@@ -179,7 +178,7 @@ protected class HttpUtils(hostname: String, timeout: 
FiniteDuration, maxResponse
   cm.setDefaultMaxPerRoute(maxConcurrent)
   cm.setMaxTotal(maxConcurrent)
   cm
-})
+} else null)
 .useSystemProperties()
 .disableAutomaticRetries()
 .build



[incubator-openwhisk] branch master updated: Refactor invoker roles (#3785)

2018-07-06 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 818cebf  Refactor invoker roles (#3785)
818cebf is described below

commit 818cebf941e66beef553e79831740f8632e78c68
Author: James Dubee 
AuthorDate: Fri Jul 6 10:01:33 2018 -0400

Refactor invoker roles (#3785)

* Refactor invoker roles

* Make invoker environment variable name generic

* Use base index for invoker name
---
 ansible/invoker.yml| 22 ++-
 ansible/roles/invoker/tasks/clean.yml  | 19 -
 ansible/roles/invoker/tasks/deploy.yml | 51 --
 3 files changed, 63 insertions(+), 29 deletions(-)

diff --git a/ansible/invoker.yml b/ansible/invoker.yml
index 479764d..9ab49e2 100644
--- a/ansible/invoker.yml
+++ b/ansible/invoker.yml
@@ -4,5 +4,25 @@
 # This playbook deploys Openwhisk Invokers.
 
 - hosts: invokers
+  vars:
+#
+# host_group - usually "{{ groups['...'] }}" where '...' is what was used
+#   for 'hosts' above.  The hostname of each host will be looked up in this
+#   group to assign a zero-based index.  That index will be used in concert
+#   with 'name_prefix' below to assign a host/container name.
+host_group: "{{ groups['invokers'] }}"
+#
+# name_prefix - a unique prefix for this set of invokers.  The prefix
+#   will be used in combination with an index (determined using
+#   'host_group' above) to name host/invokers.
+name_prefix: "invoker"
+#
+# invoker_index_base - the deployment process allocates host docker
+#   ports to individual invokers based on their indices.  This is an
+#   additional offset to prevent collisions between different invoker
+#   groups. Usually 0 if only one group is being deployed, otherwise
+#   something like "{{ groups['firstinvokergroup']|length }}"
+invoker_index_base: 0
+
   roles:
-  - invoker
+- invoker
diff --git a/ansible/roles/invoker/tasks/clean.yml 
b/ansible/roles/invoker/tasks/clean.yml
index d9b2db3..9df25e7 100644
--- a/ansible/roles/invoker/tasks/clean.yml
+++ b/ansible/roles/invoker/tasks/clean.yml
@@ -3,9 +3,14 @@
 ---
 # Remove invoker containers.
 
+- name: get invoker name and index
+  set_fact:
+invoker_name: "{{ name_prefix ~ ((invoker_index_base | int) + 
host_group.index(inventory_hostname)) }}"
+invoker_index: "{{ (invoker_index_base | int) + 
host_group.index(inventory_hostname) }}"
+
 - name: remove invoker
   docker_container:
-name: "invoker{{ groups['invokers'].index(inventory_hostname) }}"
+name: "{{ invoker_name }}"
 image: "{{ docker_registry }}{{ docker.image.prefix }}/invoker:{{ 
docker.image.tag }}"
 state: absent
 stop_timeout: 60
@@ -15,9 +20,9 @@
 # In case the invoker could not clean up completely in time.
 - name: pause/resume at runc-level to restore docker consistency
   shell: |
-DOCKER_PAUSED=$(docker ps --filter status=paused --filter name=wsk{{ 
groups['invokers'].index(inventory_hostname) }} -q --no-trunc)
+DOCKER_PAUSED=$(docker ps --filter status=paused --filter name=wsk{{ 
invoker_index }} -q --no-trunc)
 for C in $DOCKER_PAUSED; do docker-runc pause $C; done
-DOCKER_RUNNING=$(docker ps --filter status=running --filter name=wsk{{ 
groups['invokers'].index(inventory_hostname) }} -q --no-trunc)
+DOCKER_RUNNING=$(docker ps --filter status=running --filter name=wsk{{ 
invoker_index }} -q --no-trunc)
 for C2 in $DOCKER_RUNNING; do docker-runc resume $C2; done
 TOTAL=$(($(echo $DOCKER_PAUSED | wc -w)+$(echo $DOCKER_RUNNING | wc 
-w)))
 echo "Handled $TOTAL remaining actions."
@@ -28,22 +33,22 @@
 - debug: msg="{{ runc_output.stdout }}"
 
 - name: unpause remaining actions
-  shell: "docker unpause $(docker ps -aq --filter status=paused --filter 
name=wsk{{ groups['invokers'].index(inventory_hostname) }})"
+  shell: "docker unpause $(docker ps -aq --filter status=paused --filter 
name=wsk{{ invoker_index }})"
   failed_when: False
 
 - name: remove remaining actions
-  shell: "docker rm -f $(docker ps -aq --filter name=wsk{{ 
groups['invokers'].index(inventory_hostname) }})"
+  shell: "docker rm -f $(docker ps -aq --filter name=wsk{{ invoker_index }})"
   failed_when: False
 
 - name: remove invoker log directory
   file:
-path: "{{ whisk_logs_dir }}/invoker{{ 
groups['invokers'].index(inventory_hostname) }}"
+path: "{{ whisk_logs_dir }}/{{ invoker_name }}"
 state: absent
   b

[incubator-openwhisk] branch master updated: Limit TLS protocols used for kafka and controller. (#3881)

2018-07-17 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 9afaf7b  Limit TLS protocols used for kafka and controller. (#3881)
9afaf7b is described below

commit 9afaf7be1a9c5cf69ad567e416d46cce351ee01a
Author: Vadim Raskin 
AuthorDate: Wed Jul 18 08:49:41 2018 +0200

Limit TLS protocols used for kafka and controller. (#3881)

* Limit TLS protocols used for kafka and controller

* Move protocols property into group_vars
---
 ansible/group_vars/all  |  2 ++
 ansible/roles/kafka/tasks/deploy.yml|  1 +
 core/controller/src/main/resources/application.conf | 18 ++
 3 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 729a683..19a7c9f 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -131,6 +131,8 @@ kafka:
 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
 - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+protocols:
+- TLSv1.2
   protocol: "{{ kafka_protocol_for_setup }}"
   version: 0.11.0.1
   port: 9072
diff --git a/ansible/roles/kafka/tasks/deploy.yml 
b/ansible/roles/kafka/tasks/deploy.yml
index 8f195fe..436fcc9 100644
--- a/ansible/roles/kafka/tasks/deploy.yml
+++ b/ansible/roles/kafka/tasks/deploy.yml
@@ -51,6 +51,7 @@
   "KAFKA_SSL_TRUSTSTORE_PASSWORD": "{{ kafka.ssl.keystore.password }}"
   "KAFKA_SSL_CLIENT_AUTH": "{{ kafka.ssl.client_authentication }}"
   "KAFKA_SSL_CIPHER_SUITES": "{{ kafka.ssl.cipher_suites | join(',') }}"
+  "KAFKA_SSL_ENABLED_PROTOCOLS": "{{ kafka.ssl.protocols | join(',') }}"
 # The sed script passed in CUSTOM_INIT_SCRIPT fixes a bug in the 
wurstmeister dcoker image
 # by patching the server.configuration file right before kafka is started.
 # The script adds the missing advertized hostname to the 
advertised.listener property
diff --git a/core/controller/src/main/resources/application.conf 
b/core/controller/src/main/resources/application.conf
index 77ce527..f0a7877 100644
--- a/core/controller/src/main/resources/application.conf
+++ b/core/controller/src/main/resources/application.conf
@@ -73,11 +73,16 @@ akka {
   }
 }
 
-ssl-config.enabledCipherSuites = [
-  "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
-  "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
-  "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
-]
+ssl-config {
+  enabledCipherSuites = [
+"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+  ]
+  enabledProtocols = [
+"TLSv1.2"
+  ]
+}
 
 whisk{
   # tracing configuration
@@ -85,6 +90,3 @@ whisk{
 component = "Controller"
   }
 }
-
-
-



[incubator-openwhisk] branch master updated: Recover image pulls by trying to run the container anyways. (#3813)

2018-07-24 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 8b5abe7  Recover image pulls by trying to run the container anyways. 
(#3813)
8b5abe7 is described below

commit 8b5abe7bbeb4464af586c1993fc3590e0fe8d516
Author: Markus Thömmes 
AuthorDate: Tue Jul 24 11:29:43 2018 +0200

Recover image pulls by trying to run the container anyways. (#3813)

A `docker pull` can fail due to various reasons. One of them is network 
throttling by the image registry. Since we try to pull on each blackbox 
invocation, high volume load can cause lots of errors due to failing pulls 
unnecessarily.

Instead, we try to run the container even if the pull failed in the first 
place. If the image is available locally, the container will start just fine 
and recover the error gracefully. If the image is not available locally, the 
run will fail as well and return the same error as the docker pull would've 
returned.

This behavior will only be enabled for blackbox actions that specify a tag. 
Blackbox actions not using a tag *or* using "latest" as a tag will exhibit the 
very same behavior as today. That is: There will always be a pull before each 
container start and a failing pull will result in an error reported to the 
user. This is to enable rapid prototyping on images and enable determinism in 
the workflow. Updating the action will then force a pull and will fail early if 
that pull fails. With t [...]

For production workload it is considered best-practice to version images 
through labels, thus we can "safely" assume that we can fall back to a local 
image in case the pull fails.
---
 .../containerpool/docker/DockerContainer.scala | 48 +++
 .../docker/DockerContainerFactory.scala|  9 +-
 docs/actions-docker.md | 10 +++
 .../docker/test/DockerContainerTests.scala | 98 +++---
 4 files changed, 112 insertions(+), 53 deletions(-)

diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
 
b/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
index 5c959de..499ae42 100644
--- 
a/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
+++ 
b/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
@@ -39,6 +39,7 @@ import akka.stream.stage._
 import akka.util.ByteString
 import spray.json._
 import whisk.core.containerpool.logging.LogLine
+import whisk.core.entity.ExecManifest.ImageName
 import whisk.http.Messages
 
 object DockerContainer {
@@ -54,9 +55,7 @@ object DockerContainer {
* Creates a container running on a docker daemon.
*
* @param transid transaction creating the container
-   * @param image image to create the container from
-   * @param userProvidedImage whether the image is provided by the user
-   * or is an OpenWhisk provided image
+   * @param image either a user provided (Left) or OpenWhisk provided (Right) 
image
* @param memory memorylimit of the container
* @param cpuShares sharefactor for the container
* @param environment environment variables to set on the container
@@ -67,8 +66,7 @@ object DockerContainer {
* @return a Future which either completes with a DockerContainer or one of 
two specific failures
*/
   def create(transid: TransactionId,
- image: String,
- userProvidedImage: Boolean = false,
+ image: Either[ImageName, String],
  memory: ByteSize = 256.MB,
  cpuShares: Int = 0,
  environment: Map[String, String] = Map.empty,
@@ -104,22 +102,44 @@ object DockerContainer {
   dnsServers.flatMap(d => Seq("--dns", d)) ++
   name.map(n => Seq("--name", n)).getOrElse(Seq.empty) ++
   params
-val pulled = if (userProvidedImage) {
-  docker.pull(image).recoverWith {
-case _ => 
Future.failed(BlackboxStartupError(Messages.imagePullError(image)))
-  }
-} else Future.successful(())
+
+val imageToUse = image.fold(_.publicImageName, identity)
+
+val pulled = image match {
+  case Left(userProvided) if userProvided.tag.map(_ == 
"latest").getOrElse(true) =>
+// Iff the image tag is "latest" explicitly (or implicitly because no 
tag is given at all), failing to pull will
+// fail the whole container bringup process, because it is expected to 
pick up the very latest "untagged"
+// version every time.
+docker.pull(imageToUse).map(_ => true).recoverWith {
+  case _ => 
Future.failed(BlackboxStartupError(Messages.imagePullError(imageToUse)))
+}
+  case Left(_) =>
+// Iff

[incubator-openwhisk] branch master updated: Throttle the system based on active-ack timeouts. (#3875)

2018-07-26 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 9dd34f2  Throttle the system based on active-ack timeouts. (#3875)
9dd34f2 is described below

commit 9dd34f2f7f82a52d8e2400559781626bda8b8d02
Author: Markus Thömmes 
AuthorDate: Thu Jul 26 13:45:16 2018 +0200

Throttle the system based on active-ack timeouts. (#3875)

Today, we have an arbitrary system-wide limit of maximum concurrent 
connections. In general that is fine, but it doesn't have a direct correlation 
to what's actually happening in the system.

This adds a new state to each monitored invoker: Overloaded. An invoker 
will go into overloaded state if active-acks are starting to timeout. 
Eventually, if the system is really overloaded, all Invokers will be in 
overloaded state which will cause the loadbalancer to return a failure. This 
failure now results in a 503 - System overloaded message back to the user.
---
 ansible/README.md  |   2 -
 ansible/group_vars/all |   1 -
 ansible/roles/controller/tasks/deploy.yml  |   2 -
 ansible/templates/whisk.properties.j2  |   1 -
 .../src/main/scala/whisk/common/Logging.scala  |   4 +-
 .../src/main/scala/whisk/common/RingBuffer.scala   |   2 +-
 .../src/main/scala/whisk/core/WhiskConfig.scala|   2 -
 .../main/scala/whisk/core/controller/Actions.scala |   4 +
 .../scala/whisk/core/controller/Controller.scala   |   4 +-
 .../scala/whisk/core/controller/WebActions.scala   |   5 +
 .../core/entitlement/ActivationThrottler.scala |  19 +--
 .../scala/whisk/core/entitlement/Entitlement.scala |  29 +---
 .../core/loadBalancer/InvokerSupervision.scala | 176 -
 .../ShardingContainerPoolBalancer.scala|  49 --
 tests/performance/preparation/deploy.sh|   2 +-
 .../test/InvokerSupervisionTests.scala |  67 ++--
 .../test/ShardingContainerPoolBalancerTests.scala  |   3 +-
 17 files changed, 211 insertions(+), 161 deletions(-)

diff --git a/ansible/README.md b/ansible/README.md
index f4f147c..ec7d086 100644
--- a/ansible/README.md
+++ b/ansible/README.md
@@ -348,12 +348,10 @@ The default system throttling limits are configured in 
this file [./group_vars/a
 limits:
   invocationsPerMinute: "{{ limit_invocations_per_minute | default(60) }}"
   concurrentInvocations: "{{ limit_invocations_concurrent | default(30) }}"
-  concurrentInvocationsSystem:  "{{ limit_invocations_concurrent_system | 
default(5000) }}"
   firesPerMinute: "{{ limit_fires_per_minute | default(60) }}"
   sequenceMaxLength: "{{ limit_sequence_max_length | default(50) }}"
 ```
 - The `limits.invocationsPerMinute` represents the allowed namespace action 
invocations per minute.
 - The `limits.concurrentInvocations` represents the maximum concurrent 
invocations allowed per namespace.
-- The `limits.concurrentInvocationsSystem` represents the maximum concurrent 
invocations the system will allow across all namespaces.
 - The `limits.firesPerMinute` represents the allowed namespace trigger firings 
per minute.
 - The `limits.sequenceMaxLength` represents the maximum length of a sequence 
action.
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index aa32ede..2114630 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -53,7 +53,6 @@ runtimesManifest: "{{ runtimes_manifest | 
default(lookup('file', openwhisk_home
 limits:
   invocationsPerMinute: "{{ limit_invocations_per_minute | default(60) }}"
   concurrentInvocations: "{{ limit_invocations_concurrent | default(30) }}"
-  concurrentInvocationsSystem:  "{{ limit_invocations_concurrent_system | 
default(5000) }}"
   firesPerMinute: "{{ limit_fires_per_minute | default(60) }}"
   sequenceMaxLength: "{{ limit_sequence_max_length | default(50) }}"
 
diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index 92c576d..11d7269 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -178,8 +178,6 @@
 
   "LIMITS_ACTIONS_INVOKES_PERMINUTE": "{{ limits.invocationsPerMinute }}"
   "LIMITS_ACTIONS_INVOKES_CONCURRENT": "{{ limits.concurrentInvocations }}"
-  "LIMITS_ACTIONS_INVOKES_CONCURRENTINSYSTEM":
-"{{ limits.concurrentInvocationsSystem }}"
   "LIMITS_TRIGGERS_FIRES_PERMINUTE": "{{ limits.firesPerMinute }}"
   "LIMITS_ACTIONS_SEQUENCE_MAXLENGTH": "{{ limits.sequenceMaxLength }}"
 
diff --git a/ansible/templates/whisk.properties.j2

[incubator-openwhisk] branch master updated: Log possible errors around creation of kafka clients. (#3972)

2018-08-17 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 0313730  Log possible errors around creation of kafka clients. (#3972)
0313730 is described below

commit 03137300589c295ba360963f84982e8bd80f30e6
Author: Markus Thömmes 
AuthorDate: Fri Aug 17 09:41:55 2018 +0200

Log possible errors around creation of kafka clients. (#3972)
---
 .../connector/kafka/KafkaConsumerConnector.scala   | 29 ++--
 .../connector/kafka/KafkaProducerConnector.scala   | 10 ++--
 .../src/main/scala/whisk/utils/Exceptions.scala| 54 ++
 3 files changed, 75 insertions(+), 18 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
index 1f2ea2b..0891cee 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
@@ -27,6 +27,7 @@ import whisk.common.{Logging, LoggingMarkers, MetricEmitter, 
Scheduler}
 import whisk.connector.kafka.KafkaConfiguration._
 import whisk.core.ConfigKeys
 import whisk.core.connector.MessageConsumer
+import whisk.utils.Exceptions
 import whisk.utils.TimeHelpers._
 
 import scala.collection.JavaConverters._
@@ -41,7 +42,8 @@ class KafkaConsumerConnector(
   groupid: String,
   topic: String,
   override val maxPeek: Int = Int.MaxValue)(implicit logging: Logging, 
actorSystem: ActorSystem)
-extends MessageConsumer {
+extends MessageConsumer
+with Exceptions {
 
   implicit val ec: ExecutionContext = actorSystem.dispatcher
   private val gracefulWaitTime = 100.milliseconds
@@ -148,28 +150,27 @@ class KafkaConsumerConnector(
 
 verifyConfig(config, ConsumerConfig.configNames().asScala.toSet)
 
-val consumer = new KafkaConsumer(config, new ByteArrayDeserializer, new 
ByteArrayDeserializer)
+val consumer = tryAndThrow(s"creating consumer for $topic") {
+  new KafkaConsumer(config, new ByteArrayDeserializer, new 
ByteArrayDeserializer)
+}
 
 // subscribe does not need to be synchronized, because the reference to 
the consumer hasn't been returned yet and
 // thus this is guaranteed only to be called by the calling thread.
-consumer.subscribe(Seq(topic).asJavaCollection)
+tryAndThrow(s"subscribing to 
$topic")(consumer.subscribe(Seq(topic).asJavaCollection))
+
 consumer
   }
 
   private def recreateConsumer(): Unit = synchronized {
 logging.info(this, s"recreating consumer for '$topic'")
-try {
-  consumer.close()
-} catch {
-  // According to documentation, the consumer is force closed if it cannot 
be closed gracefully.
-  // See 
https://kafka.apache.org/11/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html
-  //
-  // For the moment, we have no special handling of 'InterruptException' - 
it may be possible or even
-  // needed to re-try the 'close()' when being interrupted.
-  case t: Throwable =>
-logging.error(this, s"failed to close old consumer while recreating: 
$t")
-}
+// According to documentation, the consumer is force closed if it cannot 
be closed gracefully.
+// See 
https://kafka.apache.org/11/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html
+//
+// For the moment, we have no special handling of 'InterruptException' - 
it may be possible or even
+// needed to re-try the 'close()' when being interrupted.
+tryAndSwallow("closing old consumer")(consumer.close())
 logging.info(this, s"old consumer closed for '$topic'")
+
 consumer = createConsumer(topic)
   }
 
diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
index aea6b3c..692a149 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
@@ -28,6 +28,7 @@ import whisk.connector.kafka.KafkaConfiguration._
 import whisk.core.ConfigKeys
 import whisk.core.connector.{Message, MessageProducer}
 import whisk.core.entity.UUIDs
+import whisk.utils.Exceptions
 
 import scala.collection.JavaConverters._
 import scala.concurrent.duration._
@@ -36,7 +37,8 @@ import scala.util.{Failure, Success}
 
 class KafkaProducerConnector(kafkahosts: String, id: String = 
UUIDs.randomUUID().toString)(implicit logging: Logging,

actorSystem: ActorS

[incubator-openwhisk] branch master updated (2b3f586 -> e054131)

2018-09-20 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git.


from 2b3f586  Add User-Agent to list of allowed CORS headers. (#4010)
 add e054131  Enable passing absolute url to gatling test and few other 
fixes. (#4032)

No new revisions were added by this update.

Summary of changes:
 .../gatling_tests/src/gatling/resources/conf/logback.xml   |  2 +-
 .../scala/extension/whisk/OpenWhiskProtocolBuilder.scala   | 14 ++
 2 files changed, 15 insertions(+), 1 deletion(-)



[incubator-openwhisk] branch master updated: Customize invoker user memory. (#4011)

2018-10-09 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 57a38de  Customize invoker user memory. (#4011)
57a38de is described below

commit 57a38de7a6b23bc4c42f7e777a4cb2da477a355f
Author: ningyougang <415622...@qq.com>
AuthorDate: Tue Oct 9 16:21:01 2018 +0800

Customize invoker user memory. (#4011)

Currently, all invokers share the same value from invoker.user-memory,
but in some case, some invoker machines have big memory, some invoker
machines may have low memory, so it is necessary to customize invoker
user-memory.
---
 ansible/roles/controller/tasks/deploy.yml  |   2 -
 ansible/roles/invoker/tasks/deploy.yml |   2 +-
 .../main/scala/whisk/core/entity/InstanceId.scala  |   6 +-
 .../src/main/scala/whisk/core/entity/Size.scala|   2 +-
 core/controller/src/main/resources/reference.conf  |   1 -
 .../core/loadBalancer/InvokerSupervision.scala |   5 +-
 .../ShardingContainerPoolBalancer.scala|  47 ---
 .../main/scala/whisk/core/invoker/Invoker.scala|  12 +-
 .../connector/tests/CompletionMessageTests.scala   |  17 ++-
 .../docker/test/DockerContainerFactoryTests.scala  |   7 +-
 .../containerpool/test/ContainerProxyTests.scala   | 138 ++---
 .../test/InvokerSupervisionTests.scala |  70 +++
 .../test/ShardingContainerPoolBalancerTests.scala  |  44 ---
 13 files changed, 256 insertions(+), 97 deletions(-)

diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index de2d71f..a4cc836 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -214,8 +214,6 @@
 "{{ controller.ssl.storeFlavor }}"
   "CONFIG_whisk_controller_https_clientAuth":
 "{{ controller.ssl.clientAuth }}"
-  "CONFIG_whisk_loadbalancer_invokerUserMemory":
-"{{ invoker.userMemory }}"
   "CONFIG_whisk_loadbalancer_blackboxFraction":
 "{{ controller.blackboxFraction }}"
   "CONFIG_whisk_loadbalancer_timeoutFactor":
diff --git a/ansible/roles/invoker/tasks/deploy.yml 
b/ansible/roles/invoker/tasks/deploy.yml
index 145febf..6712c38 100644
--- a/ansible/roles/invoker/tasks/deploy.yml
+++ b/ansible/roles/invoker/tasks/deploy.yml
@@ -217,7 +217,7 @@
   "CONFIG_whisk_runtimes_localImagePrefix": "{{ 
runtimes_local_image_prefix | default() }}"
   "CONFIG_whisk_containerFactory_containerArgs_network": "{{ 
invoker_container_network_name | default('bridge') }}"
   "INVOKER_CONTAINER_POLICY": "{{ invoker_container_policy_name | 
default()}}"
-  "CONFIG_whisk_containerPool_userMemory": "{{ invoker.userMemory }}"
+  "CONFIG_whisk_containerPool_userMemory": "{{ 
hostvars[groups['invokers'][invoker_index | int]].user_memory | 
default(invoker.userMemory) }}"
   "CONFIG_whisk_docker_client_parallelRuns": "{{ invoker_parallel_runs | 
default() }}"
   "CONFIG_whisk_docker_containerFactory_useRunc": "{{ invoker.useRunc }}"
   "WHISK_LOGS_DIR": "{{ whisk_logs_dir }}"
diff --git a/common/scala/src/main/scala/whisk/core/entity/InstanceId.scala 
b/common/scala/src/main/scala/whisk/core/entity/InstanceId.scala
index 5122980..0d477b4 100644
--- a/common/scala/src/main/scala/whisk/core/entity/InstanceId.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/InstanceId.scala
@@ -30,7 +30,8 @@ import whisk.core.entity.ControllerInstanceId.MAX_NAME_LENGTH
  */
 case class InvokerInstanceId(val instance: Int,
  uniqueName: Option[String] = None,
- displayedName: Option[String] = None) {
+ displayedName: Option[String] = None,
+ val userMemory: ByteSize) {
   def toInt: Int = instance
 
   override def toString: String = (Seq("invoker" + instance) ++ uniqueName ++ 
displayedName).mkString("/")
@@ -43,7 +44,8 @@ case class ControllerInstanceId(val asString: String) {
 }
 
 object InvokerInstanceId extends DefaultJsonProtocol {
-  implicit val serdes = jsonFormat3(InvokerInstanceId.apply)
+  import whisk.core.entity.size.{serdes => xserds}
+  implicit val serdes = jsonFormat4(InvokerInstanceId.apply)
 }
 
 object ControllerInstanceId extends DefaultJsonProtocol {
diff --git a/common/scala/src/main/scala/whisk/core/entity/Size.scala 
b/common/scala/src/main/scala/whisk/core/entity/Size.scala
index 34f5bc6..9af4128 100644
--- a/common/scala/src/main/scala/whisk/core/entity/Size.scala
+++ b/commo

[incubator-openwhisk] branch master updated: Allow additional namespaces for subjects created in initdb.yml. (#3981)

2018-10-15 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 3edd205  Allow additional namespaces for subjects created in 
initdb.yml. (#3981)
3edd205 is described below

commit 3edd205b143633ca5714494d8cc039349a9e8bf2
Author: Vadim Raskin 
AuthorDate: Mon Oct 15 15:34:26 2018 +0200

Allow additional namespaces for subjects created in initdb.yml. (#3981)

* Allow additional namespaces for subjects created in initdb.yml

* Add extra namespaces before adding main namespace
---
 ansible/tasks/initdb.yml | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/ansible/tasks/initdb.yml b/ansible/tasks/initdb.yml
index 16608ae..cc2831a 100644
--- a/ansible/tasks/initdb.yml
+++ b/ansible/tasks/initdb.yml
@@ -34,10 +34,20 @@
   {
 "_id": "{{ item }}",
 "subject": "{{ item }}",
-"namespaces": [{
-  "name": "{{ item }}",
-  "uuid": "{{ key.split(":")[0] }}",
-  "key": "{{ key.split(":")[1] }}"
-}]
+"namespaces": [
+{% if 'extraNamespaces' in db and item in db.extraNamespaces %}
+  {% for ns in db.extraNamespaces[item] %}
+  {
+"name": "{{ item }}{{ ns.postfix }}",
+"uuid": "{{ ns.uuid }}",
+"key": "{{ ns.key }}"
+  },
+  {% endfor %}
+{% endif %}
+  {
+"name": "{{ item }}",
+"uuid": "{{ key.split(":")[0] }}",
+"key": "{{ key.split(":")[1] }}"
+  }]
   }
   with_items: "{{ db.authkeys }}"



[incubator-openwhisk] branch master updated: Catch kafka producer exceptions and recreate the producer. (#4080)

2018-10-26 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 538517b  Catch kafka producer exceptions and recreate the producer. 
(#4080)
538517b is described below

commit 538517bbb5ba5aa1ffc7699864e9a2c53ee76561
Author: Vadim Raskin 
AuthorDate: Fri Oct 26 09:51:14 2018 +0200

Catch kafka producer exceptions and recreate the producer. (#4080)

* Catch producer exceptions and recreate the producer

* Replace try, plain failure/success with trying, remove dup error logging
---
 .../whisk/connector/kafka/KafkaProducerConnector.scala  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
index bda2a11..7af5c18 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaProducerConnector.scala
@@ -55,12 +55,17 @@ class KafkaProducerConnector(
 
 Future {
   blocking {
-producer.send(record, new Callback {
-  override def onCompletion(metadata: RecordMetadata, exception: 
Exception): Unit = {
-if (exception == null) produced.success(metadata)
-else produced.failure(exception)
-  }
-})
+try {
+  producer.send(record, new Callback {
+override def onCompletion(metadata: RecordMetadata, exception: 
Exception): Unit = {
+  if (exception == null) produced.trySuccess(metadata)
+  else produced.tryFailure(exception)
+}
+  })
+} catch {
+  case e: Throwable =>
+produced.tryFailure(e)
+}
   }
 }
 



[incubator-openwhisk] branch master updated: Fix broken CLI tests. (#4084)

2018-10-26 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 8851cab  Fix broken CLI tests. (#4084)
8851cab is described below

commit 8851cab741ad6cecce40a93275c4a2f3c1b82532
Author: Sven Lange-Last 
AuthorDate: Fri Oct 26 12:58:19 2018 +0200

Fix broken CLI tests. (#4084)

PR #3950 broke CLI tests in repo 
https://github.com/apache/incubator-openwhisk-cli because these tests override 
`val wsk` with a CLI specific implementation of type `Wsk` instead of 
`WskRestOperations`.

This change makes sure that `val wsk` has the common parent class 
`WskOperations`.
---
 tests/src/test/scala/system/basic/WskActionTests.scala | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/tests/src/test/scala/system/basic/WskActionTests.scala 
b/tests/src/test/scala/system/basic/WskActionTests.scala
index 7830da7..bec66b7 100644
--- a/tests/src/test/scala/system/basic/WskActionTests.scala
+++ b/tests/src/test/scala/system/basic/WskActionTests.scala
@@ -32,7 +32,9 @@ import spray.json.DefaultJsonProtocol._
 class WskActionTests extends TestHelpers with WskTestHelpers with JsHelpers 
with WskActorSystem {
 
   implicit val wskprops = WskProps()
-  val wsk = new WskRestOperations
+  // wsk must have type WskOperations so that tests using CLI (class Wsk)
+  // instead of REST (WskRestOperations) still work.
+  val wsk: WskOperations = new WskRestOperations
 
   val testString = "this is a test"
   val testResult = JsObject("count" -> testString.split(" ").length.toJson)



[incubator-openwhisk] branch master updated: Correctly recover from errors when fetching an action

2017-05-18 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git

The following commit(s) were added to refs/heads/master by this push:
   new  6130c5c   Correctly recover from errors when fetching an action
6130c5c is described below

commit 6130c5ce94b7990f809a7da84126d98abffb02ca
Author: Markus Thoemmes 
AuthorDate: Wed May 17 08:12:36 2017 +0200

Correctly recover from errors when fetching an action

If the invoker fails to fetch an action from the database it needs to 
inform the ActivationFeed that it still has resources (it didn't consume a 
container after all).

Reporting of those errors needs some disambiguation, as an error on 
fetching the action could also be caused by the user (for example by 
concurrently deleting the action while invoking it). The InvokerHealth protocol 
would shutdown the invoker, iff the activation was reported as a WHISK_ERROR. 
All errors but DocumentNotFound are considered WHISK_ERRORs though.

Also in this commit:
- Loglevel change from error to warn for a missing revision. The 
InvokerHealth protocol produces ERROR otherwise.
- Some documentation and restructuring of the ContainerPool's setup.
---
 .../src/main/scala/whisk/http/ErrorResponse.scala  | 14 ++-
 .../scala/whisk/core/invoker/InvokerReactive.scala | 29 ++
 2 files changed, 32 insertions(+), 11 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/http/ErrorResponse.scala 
b/common/scala/src/main/scala/whisk/http/ErrorResponse.scala
index 98b2490..d89d749 100644
--- a/common/scala/src/main/scala/whisk/http/ErrorResponse.scala
+++ b/common/scala/src/main/scala/whisk/http/ErrorResponse.scala
@@ -155,6 +155,8 @@ object Messages {
 if (!init) "." else " during initialization."
 }
 }
+
+val actionRemovedWhileInvoking = "Action could not be found or may have 
been deleted."
 }
 
 /** Replaces rejections with Json object containing cause and transaction id. 
*/
@@ -171,13 +173,13 @@ object ErrorResponse extends Directives {
 } getOrElse None)
 }
 
- def terminate(status: StatusCode, error: Option[ErrorResponse] = None, 
asJson: Boolean = true)(implicit transid: TransactionId): StandardRoute = {
+def terminate(status: StatusCode, error: Option[ErrorResponse] = None, 
asJson: Boolean = true)(implicit transid: TransactionId): StandardRoute = {
 val errorResponse = error getOrElse response(status)
-if (asJson) {
-complete(status, errorResponse)
-} else {
-complete(status, s"${errorResponse.error} (code: 
${errorResponse.code})")
-}
+if (asJson) {
+complete(status, errorResponse)
+} else {
+complete(status, s"${errorResponse.error} (code: 
${errorResponse.code})")
+}
 }
 
 def response(status: StatusCode)(implicit transid: TransactionId): 
ErrorResponse = status match {
diff --git 
a/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala 
b/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala
index dc050ee..f6ab973 100644
--- a/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala
+++ b/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala
@@ -46,6 +46,10 @@ import whisk.core.dispatcher.MessageHandler
 import whisk.core.entity._
 import whisk.core.entity.ExecManifest.ImageName
 import whisk.core.entity.size._
+import whisk.core.dispatcher.ActivationFeed.ContainerReleased
+import whisk.core.containerpool.ContainerPool
+import whisk.core.database.NoDocumentException
+import whisk.http.Messages
 
 class InvokerReactive(
 config: WhiskConfig,
@@ -67,6 +71,7 @@ class InvokerReactive(
 val cleaning = docker.ps(Seq("name" -> 
"wsk_"))(TransactionId.invokerNanny).flatMap { containers =>
 val removals = containers.map { id =>
 runc.resume(id)(TransactionId.invokerNanny).recoverWith {
+// Ignore resume failures and try to remove anyway
 case _ => Future.successful(())
 }.flatMap {
 _ => docker.rm(id)(TransactionId.invokerNanny)
@@ -80,6 +85,7 @@ class InvokerReactive(
 cleanup()
 sys.addShutdownHook(cleanup())
 
+/** Factory used by the ContainerProxy to physically create a new 
container. */
 val containerFactory = (tid: TransactionId, name: String, actionImage: 
ImageName, userProvidedImage: Boolean, memory: ByteSize) => {
 val image = if (userProvidedImage) {
 actionImage.publicImageName
@@ -99,6 +105,7 @@ class InvokerReactive(
 name = Some(name))
 }
 
+/** Sends an active-ack. */
 val ack = (tid: TransactionId, activ

[incubator-openwhisk] branch master updated: Align ContainerProxy with settings of the old container pool

2017-05-18 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git

The following commit(s) were added to refs/heads/master by this push:
   new  aee7958   Align ContainerProxy with settings of the old container 
pool
aee7958 is described below

commit aee7958a7148ac75537afe70332945aa75a7ab06
Author: Markus Thoemmes 
AuthorDate: Thu May 18 08:15:10 2017 +0200

Align ContainerProxy with settings of the old container pool

In particular, the old pool kept containers around for 10 minutes. As the 
removal strategy does concurrent remove/create anyway keeping containers around 
for longer shouldn't harm.

Also: Fix a leak with an uncaught message.
---
 .../src/main/scala/whisk/core/containerpool/ContainerProxy.scala | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala 
b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
index 36d2d74..025a448 100644
--- a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
+++ b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
@@ -90,7 +90,7 @@ class ContainerProxy(
 implicit val ec = context.system.dispatcher
 
 // The container is destroyed after this period of time
-val unusedTimeout = 30.seconds
+val unusedTimeout = 10.minutes
 
 // The container is not paused for this period of time
 // after an activation has finished successfully
@@ -241,7 +241,8 @@ class ContainerProxy(
 // Send the job back to the pool to be rescheduled
 context.parent ! job
 stay
-case Event(ContainerRemoved, _) => stop()
+case Event(ContainerRemoved, _)  => stop()
+case Event(_: FailureMessage, _) => stop()
 }
 
 // Unstash all messages stashed while in intermediate state

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Install/upgrade six module before installing other dependencies (#2322)

2017-06-01 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git

The following commit(s) were added to refs/heads/master by this push:
   new  71cec8f   Install/upgrade six module before installing other 
dependencies (#2322)
71cec8f is described below

commit 71cec8f01af232dc396188be75bfe020a7d91028
Author: Markus Thömmes 
AuthorDate: Thu Jun 1 10:56:24 2017 +0200

Install/upgrade six module before installing other dependencies (#2322)

* Install/upgrade six module before installing other dependencies in travis

* Explicitly upgrade six in action images
---
 .travis.yml   | 4 ++--
 core/actionProxy/Dockerfile   | 2 +-
 core/python2Action/Dockerfile | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 684033f..4dfeb8d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -23,8 +23,8 @@ notifications:
   secure: 
"nbPjG0Y+P6tbEcQ+Q3k3EYv46IhQQJvRpEr/090H7A349cdn3vJRxSVKn1CtcboN7A+grhSu0Dx7jS8BwlRcOt1iqFAEDMOyN+6QcFGvpAGv3P2vZ9ML/yArH+c7mZ6k2X+BKdpqwQhrsaCwq23OAqKlP00T+S+exSmP9y3F3M6xVgCFP1RRlBTJfJiNnSir2vblskiXtAxHsO9jz/4cCxNGtzxmGD5Lhm6dchgwe7wyUCyhK8RG1NXc/z9x0mFernvPQBJJCn36NoijqbFiji1o8BaVULJszs+Gkd7rQxAntMm2wUQYWLsDTFp+V1HzgkdRHx4GpfoeK+aufGXS1r+kqGpsnMYqAqCQxtbATR7oB7XKlDjmiyFUkHng1ODDoUHp9k01Ow7CvCpwURlVglAOqmwM/+HEbUEvzrA1e5XNTn5auOJ0YsvZ9/SKToXmoZhYBnel0IVTQc2jFpp6obQEGX/0k
 [...]
 
 before_install:
-  - pip install --upgrade pip setuptools
-  - pip3 install --upgrade pip setuptools
+  - pip install --upgrade pip setuptools six
+  - pip3 install --upgrade pip setuptools six
   - ./tools/travis/flake8.sh  # Check Python files for style and stop the 
build on syntax errors
 
 install:
diff --git a/core/actionProxy/Dockerfile b/core/actionProxy/Dockerfile
index 2e69465..6ec4ff4 100644
--- a/core/actionProxy/Dockerfile
+++ b/core/actionProxy/Dockerfile
@@ -7,7 +7,7 @@ RUN apk add --no-cache bash \
 bzip2-dev \
 gcc \
 libc-dev \
-  && pip install --upgrade pip setuptools \
+  && pip install --upgrade pip setuptools six \
   && pip install --no-cache-dir gevent==1.2.1 flask==0.12 \
   && apk del .build-deps
 
diff --git a/core/python2Action/Dockerfile b/core/python2Action/Dockerfile
index f05ca61..95ff655 100644
--- a/core/python2Action/Dockerfile
+++ b/core/python2Action/Dockerfile
@@ -15,7 +15,7 @@ RUN apk add --no-cache \
 python-dev
 
 # Install common modules for python
-RUN pip install --no-cache-dir --upgrade pip setuptools \
+RUN pip install --no-cache-dir --upgrade pip setuptools six \
  && pip install --no-cache-dir \
 gevent==1.1.2 \
 flask==0.11.1 \

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Choose target invoker based on specific invoker load.

2017-06-21 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new f25a8f1  Choose target invoker based on specific invoker load.
f25a8f1 is described below

commit f25a8f1f49e0086b4c9428665d15910d614e15fb
Author: Markus Thoemmes 
AuthorDate: Mon Jun 12 11:57:55 2017 +0200

Choose target invoker based on specific invoker load.

Currently, the loadbalancer advances from one Invoker to another after a 
fixed amount of invocations, which isn't aware of any load in the system 
causing suboptimal behavior.

This only advances away from the home invoker of an action (determined by 
hash) if that home invoker is "heavily" loaded. We advance further if the next 
chosen invoker is busy and so forth. If we arrive at the home invoker again, 
the system is completely loaded and we force schedule to the home invoker. Step 
sizes are determined by prime numbers and also chosen by hashing to prevent 
chasing behavior.
---
 ansible/group_vars/all |   1 +
 ansible/templates/whisk.properties.j2  |   2 +-
 .../src/main/scala/whisk/core/WhiskConfig.scala|   4 +-
 .../core/loadBalancer/LoadBalancerService.scala| 119 +++-
 .../test/LoadBalancerServiceObjectTests.scala  | 125 +
 5 files changed, 219 insertions(+), 32 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 59ef946..4e42368 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -124,6 +124,7 @@ invoker:
   arguments: "{{ invoker_arguments | default('') }}"
   numcore: 2
   coreshare: 2
+  busyThreshold: "{{ invoker_busy_threshold | default(16) }}"
   serializeDockerOp: true
   serializeDockerPull: true
   useRunc: false
diff --git a/ansible/templates/whisk.properties.j2 
b/ansible/templates/whisk.properties.j2
index 05e5f76..5ee84f7 100644
--- a/ansible/templates/whisk.properties.j2
+++ b/ansible/templates/whisk.properties.j2
@@ -107,4 +107,4 @@ apigw.auth.pwd={{apigw_auth_pwd}}
 apigw.host={{apigw_host}}
 apigw.host.v2={{apigw_host_v2}}
 
-loadbalancer.activationCountBeforeNextInvoker={{ 
loadbalancer_activation_count_before_next_invoker | default(10) }}
+loadbalancer.invokerBusyThreshold={{ invoker.busyThreshold }}
diff --git a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala 
b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
index 7fa2c94..267ad38 100644
--- a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
+++ b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
@@ -83,7 +83,7 @@ class WhiskConfig(
 val wskApiHost = this(WhiskConfig.wskApiProtocol) + "://" + 
this(WhiskConfig.wskApiHostname) + ":" + this(WhiskConfig.wskApiPort)
 val controllerHost = this(WhiskConfig.controllerHostName) + ":" + 
this(WhiskConfig.controllerHostPort)
 val controllerBlackboxFraction = 
this.getAsDouble(WhiskConfig.controllerBlackboxFraction, 0.10)
-val loadbalancerActivationCountBeforeNextInvoker = 
this.getAsInt(WhiskConfig.loadbalancerActivationCountBeforeNextInvoker, 10)
+val loadbalancerInvokerBusyThreshold = 
this.getAsInt(WhiskConfig.loadbalancerInvokerBusyThreshold, 16)
 
 val edgeHost = this(WhiskConfig.edgeHostName) + ":" + 
this(WhiskConfig.edgeHostApiPort)
 val kafkaHost = this(WhiskConfig.kafkaHostName) + ":" + 
this(WhiskConfig.kafkaHostPort)
@@ -251,7 +251,7 @@ object WhiskConfig {
 private val controllerHostPort = "controller.host.port"
 private val controllerBlackboxFraction = "controller.blackboxFraction"
 
-val loadbalancerActivationCountBeforeNextInvoker = 
"loadbalancer.activationCountBeforeNextInvoker"
+val loadbalancerInvokerBusyThreshold = "loadbalancer.invokerBusyThreshold"
 
 val kafkaHostName = "kafka.host"
 val loadbalancerHostName = "loadbalancer.host"
diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
index 201826f..7e633c5 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
@@ -20,7 +20,6 @@ package whisk.core.loadBalancer
 import java.nio.charset.StandardCharsets
 
 import java.time.{ Clock, Instant }
-import java.util.concurrent.atomic.AtomicInteger
 
 import scala.collection.concurrent.TrieMap
 import scala.concurrent.Await
@@ -46,13 +45,14 @@ import whisk.common.TransactionId
 import whisk.connector.kafka.KafkaConsumerConnector
 import whisk.connector.kafka.KafkaProducerConnector
 import whisk.core.Whi

[incubator-openwhisk] branch master updated (e9d5c50 -> c9768ea)

2017-06-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git.


from e9d5c50  Fixed typo (#2407)
 new f79ae51  Keep inactive containers around even in a fully loaded system.
 new 85e8c8f  Retype scheduling code to get rid of unsafeness.
 new c9768ea  Adjust pause grace to a less dangerous value

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../whisk/core/containerpool/ContainerPool.scala   | 146 +++--
 .../whisk/core/containerpool/ContainerProxy.scala  |  17 ++-
 .../scala/whisk/core/invoker/InvokerReactive.scala |   1 +
 .../containerpool/test/ContainerPoolTests.scala| 123 +
 .../containerpool/test/ContainerProxyTests.scala   |  24 ++--
 5 files changed, 157 insertions(+), 154 deletions(-)

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] 02/03: Retype scheduling code to get rid of unsafeness.

2017-06-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git

commit 85e8c8f1d49dac3c04fe0f58480d652f52729c54
Author: Markus Thoemmes 
AuthorDate: Tue Jun 6 14:38:18 2017 +0200

Retype scheduling code to get rid of unsafeness.

The pool used to look up the data of a container it has just chosen/created 
which is unnecessary. Removed also a non-reachable error-condition.

Also: Take out unnecessary prewarmConfig checking.
---
 .../whisk/core/containerpool/ContainerPool.scala   | 64 +++---
 .../containerpool/test/ContainerPoolTests.scala|  6 +-
 2 files changed, 34 insertions(+), 36 deletions(-)

diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala 
b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala
index 012373f..9a796ee 100644
--- a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala
+++ b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala
@@ -100,17 +100,12 @@ class ContainerPool(
 } else None
 
 container match {
-case Some(actor) =>
-freePool.get(actor) match {
-case Some(data) =>
-busyPool.update(actor, data)
-freePool.remove(actor)
-actor ! r // forwards the run request to the 
container
-case None =>
-logging.error(this, "actor data not found")
-self ! r
-}
-case None => self ! r
+case Some((actor, data)) =>
+busyPool.update(actor, data)
+freePool.remove(actor)
+actor ! r // forwards the run request to the container
+case None =>
+self ! r
 }
 
 // Container is free to take more work
@@ -133,15 +128,17 @@ class ContainerPool(
 }
 
 /** Creates a new container and updates state accordingly. */
-def createContainer() = {
+def createContainer(): (ActorRef, ContainerData) = {
 val ref = childFactory(context)
-freePool.update(ref, NoData())
-ref
+val data = NoData()
+freePool.update(ref, data)
+
+(ref, data)
 }
 
 /** Creates a new prewarmed container */
 def prewarmContainer(exec: CodeExec[_], memoryLimit: ByteSize) =
-prewarmConfig.foreach(config => childFactory(context) ! Start(exec, 
memoryLimit))
+childFactory(context) ! Start(exec, memoryLimit)
 
 /**
  * Takes a prewarm container out of the prewarmed pool
@@ -150,23 +147,24 @@ class ContainerPool(
  * @param kind the kind you want to invoke
  * @return the container iff found
  */
-def takePrewarmContainer(action: ExecutableWhiskAction) = 
prewarmConfig.flatMap { config =>
-val kind = action.exec.kind
-val memory = action.limits.memory.megabytes.MB
-prewarmedPool.find {
-case (_, PreWarmedData(_, `kind`, `memory`)) => true
-case _   => false
-}.map {
-case (ref, data) =>
-// Move the container to the usual pool
-freePool.update(ref, data)
-prewarmedPool.remove(ref)
-// Create a new prewarm container
-prewarmContainer(config.exec, config.memoryLimit)
-
-ref
+def takePrewarmContainer(action: ExecutableWhiskAction): Option[(ActorRef, 
ContainerData)] =
+prewarmConfig.flatMap { config =>
+val kind = action.exec.kind
+val memory = action.limits.memory.megabytes.MB
+prewarmedPool.find {
+case (_, PreWarmedData(_, `kind`, `memory`)) => true
+case _   => false
+}.map {
+case (ref, data) =>
+// Move the container to the usual pool
+freePool.update(ref, data)
+prewarmedPool.remove(ref)
+// Create a new prewarm container
+prewarmContainer(config.exec, config.memoryLimit)
+
+(ref, data)
+}
 }
-}
 
 /** Removes a container and updates state accordingly. */
 def removeContainer(toDelete: ActorRef) = {
@@ -192,11 +190,11 @@ object ContainerPool {
  * @param idles a map of idle containers, awaiting work
  * @return a container if one found
  */
-def schedule[A](action: ExecutableWhiskAction, invocationNamespace: 
EntityName, idles: Map[A, ContainerData]): Option[A] = {
+def schedule[A](action: ExecutableWhiskAction, invoca

[incubator-openwhisk] 03/03: Adjust pause grace to a less dangerous value

2017-06-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git

commit c9768ea96e9561ee42f4a2b7795afa46cc6b6e0a
Author: Markus Thoemmes 
AuthorDate: Thu Jun 22 08:32:10 2017 +0200

Adjust pause grace to a less dangerous value
---
 .../whisk/core/containerpool/ContainerProxy.scala  | 17 ---
 .../containerpool/test/ContainerProxyTests.scala   | 24 +++---
 2 files changed, 20 insertions(+), 21 deletions(-)

diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala 
b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
index 796624b..55c7999 100644
--- a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
+++ b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
@@ -91,21 +91,18 @@ case object ActivationCompleted
  * @param factory a function generating a Container
  * @param sendActiveAck a function sending the activation via active ack
  * @param storeActivation a function storing the activation in a persistent 
store
+ * @param unusedTimeout time after which the container is automatically thrown 
away
+ * @param pauseGrace time to wait for new work before pausing the container
  */
 class ContainerProxy(
 factory: (TransactionId, String, ImageName, Boolean, ByteSize) => 
Future[Container],
 sendActiveAck: (TransactionId, WhiskActivation) => Future[Any],
-storeActivation: (TransactionId, WhiskActivation) => Future[Any]) extends 
FSM[ContainerState, ContainerData] with Stash {
+storeActivation: (TransactionId, WhiskActivation) => Future[Any],
+unusedTimeout: FiniteDuration,
+pauseGrace: FiniteDuration) extends FSM[ContainerState, ContainerData] 
with Stash {
 implicit val ec = context.system.dispatcher
 val logging = new AkkaLogging(context.system.log)
 
-// The container is destroyed after this period of time
-val unusedTimeout = 10.minutes
-
-// The container is not paused for this period of time
-// after an activation has finished successfully
-val pauseGrace = 1.second
-
 startWith(Uninitialized, NoData())
 
 when(Uninitialized) {
@@ -384,7 +381,9 @@ class ContainerProxy(
 object ContainerProxy {
 def props(factory: (TransactionId, String, ImageName, Boolean, ByteSize) 
=> Future[Container],
   ack: (TransactionId, WhiskActivation) => Future[Any],
-  store: (TransactionId, WhiskActivation) => Future[Any]) = 
Props(new ContainerProxy(factory, ack, store))
+  store: (TransactionId, WhiskActivation) => Future[Any],
+  unusedTimeout: FiniteDuration = 10.minutes,
+  pauseGrace: FiniteDuration = 50.milliseconds) = Props(new 
ContainerProxy(factory, ack, store, unusedTimeout, pauseGrace))
 
 // Needs to be thread-safe as it's used by multiple proxies concurrently.
 private val containerCount = new Counter
diff --git 
a/tests/src/test/scala/whisk/core/containerpool/test/ContainerProxyTests.scala 
b/tests/src/test/scala/whisk/core/containerpool/test/ContainerProxyTests.scala
index a49c766..f7a7e12 100644
--- 
a/tests/src/test/scala/whisk/core/containerpool/test/ContainerProxyTests.scala
+++ 
b/tests/src/test/scala/whisk/core/containerpool/test/ContainerProxyTests.scala
@@ -148,7 +148,7 @@ class ContainerProxyTests extends 
TestKit(ActorSystem("ContainerProxys"))
 val container = new TestContainer
 val factory = createFactory(Future.successful(container))
 
-val machine = childActorOf(ContainerProxy.props(factory, createAcker, 
store))
+val machine = childActorOf(ContainerProxy.props(factory, createAcker, 
store, pauseGrace = timeout))
 registerCallback(machine)
 preWarm(machine)
 
@@ -164,7 +164,7 @@ class ContainerProxyTests extends 
TestKit(ActorSystem("ContainerProxys"))
 val factory = createFactory(Future.successful(container))
 val acker = createAcker
 
-val machine = childActorOf(ContainerProxy.props(factory, acker, store))
+val machine = childActorOf(ContainerProxy.props(factory, acker, store, 
pauseGrace = timeout))
 registerCallback(machine)
 
 preWarm(machine)
@@ -196,7 +196,7 @@ class ContainerProxyTests extends 
TestKit(ActorSystem("ContainerProxys"))
 val factory = createFactory(Future.successful(container))
 val acker = createAcker
 
-val machine = childActorOf(ContainerProxy.props(factory, acker, store))
+val machine = childActorOf(ContainerProxy.props(factory, acker, store, 
pauseGrace = timeout))
 registerCallback(machine)
 preWarm(machine)
 
@@ -220,7 +220,7 @@ class ContainerProxyTests extends 
TestKit(ActorSystem("ContainerProxys"))
 val factory = createFactory(Future.successful(container))
 

[incubator-openwhisk] 01/03: Keep inactive containers around even in a fully loaded system.

2017-06-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git

commit f79ae51ae46ce2cc19d164684c620081c7d3cc1c
Author: Markus Thoemmes 
AuthorDate: Mon Jun 5 11:08:05 2017 +0200

Keep inactive containers around even in a fully loaded system.

The ContainerPool will trash it's whole pool of containers if running 
containers are exhausting the maximum size of the pool.

This implements a scheme where the ContainerPool differs between "maximum 
containers that are allowed do work" and "maximum containers in the pool at 
all", where the latter can be greater than the first. This will keep containers 
longer around allowing for a higher warm-container hit rate with heterogeneous 
load.
---
 .../whisk/core/containerpool/ContainerPool.scala   | 106 ++-
 .../scala/whisk/core/invoker/InvokerReactive.scala |   1 +
 .../containerpool/test/ContainerPoolTests.scala| 117 ++---
 3 files changed, 115 insertions(+), 109 deletions(-)

diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala 
b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala
index fca7a99..012373f 100644
--- a/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala
+++ b/core/invoker/src/main/scala/whisk/core/containerpool/ContainerPool.scala
@@ -52,20 +52,23 @@ case class WorkerData(data: ContainerData, state: 
WorkerState)
  * Prewarm containers are only used, if they have matching arguments
  * (kind, memory) and there is space in the pool.
  *
- * @param childFactory method to create new container proxy actors
+ * @param childFactory method to create new container proxy actor
+ * @param maxActiveContainers maximum amount of containers doing work
  * @param maxPoolSize maximum size of containers allowed in the pool
  * @param feed actor to request more work from
  * @param prewarmConfig optional settings for container prewarming
  */
 class ContainerPool(
 childFactory: ActorRefFactory => ActorRef,
+maxActiveContainers: Int,
 maxPoolSize: Int,
 feed: ActorRef,
 prewarmConfig: Option[PrewarmingConfig] = None) extends Actor {
-val logging = new AkkaLogging(context.system.log)
+implicit val logging = new AkkaLogging(context.system.log)
 
-val pool = new mutable.HashMap[ActorRef, WorkerData]
-val prewarmedPool = new mutable.HashMap[ActorRef, WorkerData]
+val freePool = mutable.Map[ActorRef, ContainerData]()
+val busyPool = mutable.Map[ActorRef, ContainerData]()
+val prewarmedPool = mutable.Map[ActorRef, ContainerData]()
 
 prewarmConfig.foreach { config =>
 logging.info(this, s"pre-warming ${config.count} ${config.exec.kind} 
containers")
@@ -77,52 +80,62 @@ class ContainerPool(
 def receive: Receive = {
 // A job to run on a container
 case r: Run =>
-// Schedule a job to a warm container
-ContainerPool.schedule(r.action, r.msg.user.namespace, 
pool.toMap).orElse {
-// Create a cold container iff there's space in the pool
-if (pool.size < maxPoolSize) {
-takePrewarmContainer(r.action).orElse {
-Some(createContainer())
+val container = if (busyPool.size < maxActiveContainers) {
+// Schedule a job to a warm container
+ContainerPool.schedule(r.action, r.msg.user.namespace, 
freePool.toMap).orElse {
+if (busyPool.size + freePool.size < maxPoolSize) {
+takePrewarmContainer(r.action).orElse {
+Some(createContainer())
+}
+} else None
+}.orElse {
+// Remove a container and create a new one for the given 
job
+ContainerPool.remove(r.action, r.msg.user.namespace, 
freePool.toMap).map { toDelete =>
+removeContainer(toDelete)
+takePrewarmContainer(r.action).getOrElse {
+createContainer()
+}
 }
-} else None
-}.orElse {
-// Remove a container and create a new one for the given job
-ContainerPool.remove(r.action, r.msg.user.namespace, 
pool.toMap).map { toDelete =>
-removeContainer(toDelete)
-createContainer()
 }
-} match {
+} else None
+
+container match {
 case Some(actor) =>
-pool.get(actor) match {
-case Some(w) =>
-pool.update(actor, WorkerData(w.data, Busy))
+ 

[incubator-openwhisk] branch master updated: Action limit test for log truncation procudes less log entries

2017-06-26 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 54203c7  Action limit test for log truncation procudes less log entries
54203c7 is described below

commit 54203c71369c8c4377fb3b12113fecf4870d0f6e
Author: Sven Lange-Last 
AuthorDate: Fri Jun 23 13:42:05 2017 +0200

Action limit test for log truncation procudes less log entries

The test now uses the minimum supported log limit (1M) and creates 10% more 
log output as the limit.
In the past, the test had a limit of 2M and produced 3M logs.

We frequently saw this test failing in mainOpenwhisk in the past few days 
because it took too
long to PUT the activation record to Cloudant, i.e. more than 60 sec. We 
suspect that
concurrent load tests in YS0 may have caused the long Cloudant response 
times because the
Cloudant DB cluster is shared. But we don`t have enough diagnostic data to 
prove this assumption.

At the same time, making the log portion in the activation record smaller 
with this change
should also make the Cloudant request faster. This change does only affect 
limits and sizes but
does not change the test's design.
---
 tests/src/test/scala/whisk/core/limits/ActionLimitsTests.scala | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/tests/src/test/scala/whisk/core/limits/ActionLimitsTests.scala 
b/tests/src/test/scala/whisk/core/limits/ActionLimitsTests.scala
index ee43dbe..a125105 100644
--- a/tests/src/test/scala/whisk/core/limits/ActionLimitsTests.scala
+++ b/tests/src/test/scala/whisk/core/limits/ActionLimitsTests.scala
@@ -99,20 +99,22 @@ class ActionLimitsTests extends TestHelpers with 
WskTestHelpers {
 
 it should "succeed but truncate logs, if log size exceeds its limit" in 
withAssetCleaner(wskprops) {
 (wp, assetHelper) =>
-val allowedSize = 2 megabytes
+val bytesPerLine = 16
+val allowedSize = 1 megabytes
 val name = "TestActionCausingExceededLogs"
 assetHelper.withCleaner(wsk.action, name, confirmDelete = true) {
 val actionName = TestUtils.getTestActionFilename("dosLogs.js")
 (action, _) => action.create(name, Some(actionName), logsize = 
Some(allowedSize))
 }
 
-val attemptedSize = allowedSize + 1.megabytes
+// Add 10% to allowed size to exceed limit
+val attemptedSize = (allowedSize.toBytes * 1.1).toLong.bytes
 
 val run = wsk.action.invoke(name, Map("payload" -> 
attemptedSize.toBytes.toJson))
 withActivation(wsk.activation, run) { response =>
 val lines = response.logs.get
 lines.last shouldBe Messages.truncateLogs(allowedSize)
-(lines.length - 1) shouldBe (allowedSize.toBytes / 16)
+(lines.length - 1) shouldBe (allowedSize.toBytes / 
bytesPerLine)
 // dropping 39 characters (timestamp + stream name)
 // then reform total string adding back newlines
 val actual = lines.dropRight(1).map(_.drop(39)).mkString("", 
"\n", "\n").sizeInBytes.toBytes

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Back-off threshold and choose home invoker as default. (#2417)

2017-06-26 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 9bb2617  Back-off threshold and choose home invoker as default. (#2417)
9bb2617 is described below

commit 9bb26173e71e2c71667810970e900c291d5a8ac3
Author: Markus Thömmes 
AuthorDate: Mon Jun 26 17:33:09 2017 +0200

Back-off threshold and choose home invoker as default. (#2417)

* Back-off threshold and choose home invoker as default.

The loadbalancer falls back to picking the home invoker for the action iff 
all invokers are loaded above a defined threshold. That threshold is backed off 
3 times to prevent a "all hell breaks loose" scenario in a sustained high load.
---
 .../core/loadBalancer/LoadBalancerService.scala| 25 --
 .../test/LoadBalancerServiceObjectTests.scala  | 25 +-
 2 files changed, 34 insertions(+), 16 deletions(-)

diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
index 5390a3a..d2e8b64 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala
@@ -336,8 +336,8 @@ object LoadBalancerService {
 
 /**
  * Scans through all invokers and searches for an invoker, that has a 
queue length
- * below the defined threshold. Iff no "underloaded" invoker was found it 
will
- * default to the least loaded invoker in the list.
+ * below the defined threshold. The threshold is subject to a 3 times back 
off. Iff
+ * no "underloaded" invoker was found it will default to the home invoker.
  *
  * @param availableInvokers a list of available (healthy) invokers to 
search in
  * @param activationsPerInvoker a map of the number of outstanding 
activations per invoker
@@ -359,30 +359,33 @@ object LoadBalancerService {
 val step = stepSizes(hash % stepSizes.size)
 
 @tailrec
-def search(targetInvoker: Int, seenInvokers: Int): A = {
+def search(targetInvoker: Int, iteration: Int = 1): A = {
 // map the computed index to the actual invoker index
 val invokerName = availableInvokers(targetInvoker)
 
 // send the request to the target invoker if it has capacity...
-if (activationsPerInvoker.get(invokerName).getOrElse(0) < 
invokerBusyThreshold) {
+if (activationsPerInvoker.get(invokerName).getOrElse(0) < 
invokerBusyThreshold * iteration) {
 invokerName
 } else {
-// ... otherwise look for a less loaded invoker by 
stepping through a pre computed
+// ... otherwise look for a less loaded invoker by 
stepping through a pre-computed
 // list of invokers; there are two possible outcomes:
 // 1. the search lands on a new invoker that has capacity, 
choose it
 // 2. walked through the entire list and found no better 
invoker than the
-//"home invoker", choose the least loaded invoker
+//"home invoker", force the home invoker
 val newTarget = (targetInvoker + step) % numInvokers
-if (newTarget == homeInvoker || seenInvokers > 
numInvokers) {
-// fall back to the invoker with the least load.
-activationsPerInvoker.minBy(_._2)._1
+if (newTarget == homeInvoker) {
+if (iteration < 3) {
+search(newTarget, iteration + 1)
+} else {
+availableInvokers(homeInvoker)
+}
 } else {
-search(newTarget, seenInvokers + 1)
+search(newTarget, iteration)
 }
 }
 }
 
-Some(search(homeInvoker, 0))
+Some(search(homeInvoker))
 } else {
 None
 }
diff --git 
a/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerServiceObjectTests.scala
 
b/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerServiceObjectTests.scala
index 4f5433c..9b8bafc 100644
--- 
a/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerServiceObjectTests.scala
+++ 
b/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerServiceObjectTests.scala
@@ -112,15 +112,30 @@ class LoadBalancerServiceObjectTests extends FlatSpec 
with Matchers {
 1, hash) shou

[incubator-openwhisk] branch master updated: Start scala components with JMX enabled locally

2017-06-27 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 2f7acdd  Start scala components with JMX enabled locally
2f7acdd is described below

commit 2f7acddcf8636d1d3efd1045d2b2f0d8ab18f6b8
Author: Markus Thoemmes 
AuthorDate: Mon May 15 16:10:43 2017 +0200

Start scala components with JMX enabled locally

Enables easier profiling of local components via JVM standard tooling.
---
 ansible/environments/local/group_vars/all | 3 +++
 ansible/environments/mac/group_vars/all   | 3 +++
 ansible/roles/invoker/tasks/deploy.yml| 2 +-
 3 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/ansible/environments/local/group_vars/all 
b/ansible/environments/local/group_vars/all
index 27b8515..6c5de25 100755
--- a/ansible/environments/local/group_vars/all
+++ b/ansible/environments/local/group_vars/all
@@ -33,3 +33,6 @@ apigw_host_v2: "http://{{ groups['apigateway']|first 
}}:{{apigateway.port.api}}/
 # RunC enablement
 invoker_use_runc: true
 invoker_use_reactive_pool: false
+
+controller_arguments: '-Dcom.sun.management.jmxremote 
-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false 
-Dcom.sun.management.jmxremote.port=1098'
+invoker_arguments: "{{ controller_arguments }}"
\ No newline at end of file
diff --git a/ansible/environments/mac/group_vars/all 
b/ansible/environments/mac/group_vars/all
index 6d14d1d..051358f 100644
--- a/ansible/environments/mac/group_vars/all
+++ b/ansible/environments/mac/group_vars/all
@@ -38,3 +38,6 @@ apigw_host_v2: "http://{{ groups['apigateway']|first 
}}:{{apigateway.port.api}}/
 # RunC enablement
 invoker_use_runc: true
 invoker_use_reactive_pool: false
+
+controller_arguments: '-Dcom.sun.management.jmxremote 
-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false 
-Dcom.sun.management.jmxremote.port=1098'
+invoker_arguments: "{{ controller_arguments }}"
diff --git a/ansible/roles/invoker/tasks/deploy.yml 
b/ansible/roles/invoker/tasks/deploy.yml
index 199b1cc..090de13 100644
--- a/ansible/roles/invoker/tasks/deploy.yml
+++ b/ansible/roles/invoker/tasks/deploy.yml
@@ -66,7 +66,7 @@
 -e SERVICE_CHECK_TIMEOUT=2s
 -e SERVICE_CHECK_INTERVAL=15s
 -e JAVA_OPTS=-Xmx{{ invoker.heap }}
--e INVOKER_OPTS={{ invoker.arguments }}
+-e INVOKER_OPTS='{{ invoker.arguments }}'
 -v /sys/fs/cgroup:/sys/fs/cgroup
 -v /run/runc:/run/runc
 -v {{ whisk_logs_dir }}/invoker{{ 
groups['invokers'].index(inventory_hostname) }}:/logs

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Deterministic container removal for test.

2017-06-28 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 5ca4e16  Deterministic container removal for test.
5ca4e16 is described below

commit 5ca4e167fc55b63ee4ed14dfd3cf1d0a613ac973
Author: Markus Thoemmes 
AuthorDate: Wed Jun 28 14:23:40 2017 +0200

Deterministic container removal for test.
---
 .../test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala 
b/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala
index d386597..f7bcdf1 100644
--- 
a/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala
+++ 
b/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala
@@ -175,14 +175,14 @@ class ContainerPoolTests extends 
TestKit(ActorSystem("ContainerPool"))
 // Run the first container
 pool ! runMessage
 containers(0).expectMsg(runMessage)
-containers(0).send(pool, NeedWork(warmedData()))
+containers(0).send(pool, NeedWork(warmedData(lastUsed = 
Instant.EPOCH)))
 containers(0).send(pool, ActivationCompleted)
 feed.expectMsg(ContainerReleased)
 
 // Run the second container, don't remove the first one
 pool ! runMessageDifferentEverything
 containers(1).expectMsg(runMessageDifferentEverything)
-containers(1).send(pool, NeedWork(warmedData()))
+containers(1).send(pool, NeedWork(warmedData(lastUsed = Instant.now)))
 containers(1).send(pool, ActivationCompleted)
 feed.expectMsg(ContainerReleased)
 

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Remove ContainerPool test which is no longer applicable.

2017-07-19 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 742ab2d  Remove ContainerPool test which is no longer applicable.
742ab2d is described below

commit 742ab2df1df7cadecb75d22b9e63a2225e4d560f
Author: Markus Thoemmes 
AuthorDate: Wed Jul 19 08:35:50 2017 +0200

Remove ContainerPool test which is no longer applicable.

This test used to test a situation of self-recovery in case the 
ActivationFeed sends more messages than the ContainerPool can consume. This has 
been deemed an error situation and would be a bug in the ActivationFeed, hence 
the log is flooded with error messages.
---
 .../core/containerpool/test/ContainerPoolTests.scala| 17 -
 1 file changed, 17 deletions(-)

diff --git 
a/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala 
b/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala
index 5202ef8..198 100644
--- 
a/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala
+++ 
b/tests/src/test/scala/whisk/core/containerpool/test/ContainerPoolTests.scala
@@ -195,23 +195,6 @@ class ContainerPoolTests extends 
TestKit(ActorSystem("ContainerPool"))
 containers(1).expectMsg(runMessageDifferentNamespace)
 }
 
-it should "not remove a container to make space in the pool if it is 
already full and the same action + same invocation namespace arrives" in 
within(timeout) {
-val (containers, factory) = testContainers(2)
-val feed = TestProbe()
-
-// a pool with only 1 slot
-val pool = system.actorOf(ContainerPool.props(factory, 1, 1, feed.ref))
-pool ! runMessage
-containers(0).expectMsg(runMessage)
-containers(0).send(pool, NeedWork(warmedData()))
-feed.expectMsg(MessageFeed.Processed)
-pool ! runMessage
-containers(0).expectMsg(runMessage)
-pool ! runMessage //expect this message to be requeued since previous 
is incomplete.
-containers(0).expectNoMsg(100.milliseconds)
-containers(1).expectNoMsg(100.milliseconds)
-}
-
 /*
  * CONTAINER PREWARMING
  */

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Escape all values when deploying invoker (#2550)

2017-08-01 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 9e55e5b  Escape all values when deploying invoker (#2550)
9e55e5b is described below

commit 9e55e5b99064066c6e9c4142a7cee04cb5446cbe
Author: Markus Thömmes 
AuthorDate: Tue Aug 1 11:36:32 2017 +0200

Escape all values when deploying invoker (#2550)
---
 ansible/roles/invoker/tasks/deploy.yml | 60 +-
 1 file changed, 30 insertions(+), 30 deletions(-)

diff --git a/ansible/roles/invoker/tasks/deploy.yml 
b/ansible/roles/invoker/tasks/deploy.yml
index 5c67aa2..3fc828a 100644
--- a/ansible/roles/invoker/tasks/deploy.yml
+++ b/ansible/roles/invoker/tasks/deploy.yml
@@ -66,38 +66,38 @@
 --name invoker{{ groups['invokers'].index(inventory_hostname) }}
 --hostname invoker{{ groups['invokers'].index(inventory_hostname) }}
 --restart {{ docker.restart.policy }}
--e JAVA_OPTS=-Xmx{{ invoker.heap }}
+-e JAVA_OPTS='-Xmx{{ invoker.heap }}'
 -e INVOKER_OPTS='{{ invoker.arguments }}'
--e COMPONENT_NAME=invoker{{ 
groups['invokers'].index(inventory_hostname) }}
--e PORT=8080
--e KAFKA_HOST={{ groups['kafka']|first }}
--e KAFKA_HOST_PORT={{ kafka.port }}
--e DB_PROTOCOL={{ db_protocol }}
--e DB_PROVIDER={{ db_provider }}
--e DB_HOST={{ db_host }}
--e DB_PORT={{ db_port }}
--e DB_USERNAME={{ db_username }}
--e DB_PASSWORD={{ db_password }}
--e DB_WHISK_ACTIONS={{ db.whisk.actions }}
--e DB_WHISK_ACTIVATIONS={{ db.whisk.activations }}
--e WHISK_API_HOST_PROTO={{ whisk_api_host_proto | default('https') }}
--e WHISK_API_HOST_PORT={{ whisk_api_host_port | default('443') }}
--e WHISK_API_HOST_NAME={{ whisk_api_host_name | default(groups['edge'] 
| first) }}
+-e COMPONENT_NAME='invoker{{ 
groups['invokers'].index(inventory_hostname) }}'
+-e PORT='8080'
+-e KAFKA_HOST='{{ groups['kafka']|first }}'
+-e KAFKA_HOST_PORT='{{ kafka.port }}'
+-e DB_PROTOCOL='{{ db_protocol }}'
+-e DB_PROVIDER='{{ db_provider }}'
+-e DB_HOST='{{ db_host }}'
+-e DB_PORT='{{ db_port }}'
+-e DB_USERNAME='{{ db_username }}'
+-e DB_PASSWORD='{{ db_password }}'
+-e DB_WHISK_ACTIONS='{{ db.whisk.actions }}'
+-e DB_WHISK_ACTIVATIONS='{{ db.whisk.activations }}'
+-e WHISK_API_HOST_PROTO='{{ whisk_api_host_proto | default('https') }}'
+-e WHISK_API_HOST_PORT='{{ whisk_api_host_port | default('443') }}'
+-e WHISK_API_HOST_NAME='{{ whisk_api_host_name | 
default(groups['edge'] | first) }}'
 -e RUNTIMES_MANIFEST='{{ runtimesManifest | to_json }}'
--e SELF_DOCKER_ENDPOINT=localhost
--e DOCKER_REGISTRY={{ docker_registry }}
--e DOCKER_IMAGE_PREFIX={{ docker.image.prefix }}
--e DOCKER_IMAGE_TAG={{ docker.image.tag }}
--e INVOKER_CONTAINER_NETWORK={{ invoker_container_network_name | 
default("bridge") }}
--e INVOKER_CONTAINER_POLICY={{ invoker_container_policy_name | 
default()}}
--e INVOKER_CONTAINER_DNS={{ invoker_container_network_dns_servers | 
default()}}
--e INVOKER_NUMCORE={{ invoker.numcore }}
--e INVOKER_CORESHARE={{ invoker.coreshare }}
--e INVOKER_SERIALIZEDOCKEROP={{ invoker.serializeDockerOp }}
--e INVOKER_SERIALIZEDOCKERPULL={{ invoker.serializeDockerPull }}
--e INVOKER_USERUNC={{ invoker_use_runc | default(invoker.useRunc) }}
--e INVOKER_USEREACTIVEPOOL={{ invoker.useReactivePool }}
--e WHISK_LOGS_DIR={{ whisk_logs_dir }}
+-e SELF_DOCKER_ENDPOINT='localhost'
+-e DOCKER_REGISTRY='{{ docker_registry }}'
+-e DOCKER_IMAGE_PREFIX='{{ docker.image.prefix }}'
+-e DOCKER_IMAGE_TAG='{{ docker.image.tag }}'
+-e INVOKER_CONTAINER_NETWORK='{{ invoker_container_network_name | 
default("bridge") }}'
+-e INVOKER_CONTAINER_POLICY='{{ invoker_container_policy_name | 
default()}}'
+-e INVOKER_CONTAINER_DNS='{{ invoker_container_network_dns_servers | 
default()}}'
+-e INVOKER_NUMCORE='{{ invoker.numcore }}'
+-e INVOKER_CORESHARE='{{ invoker.coreshare }}'
+-e INVOKER_SERIALIZEDOCKEROP='{{ invoker.serializeDockerOp }}'
+-e INVOKER_SERIALIZEDOCKERPULL='{{ invoker.serializeDo

[incubator-openwhisk] branch master updated: Temporalily use published swift3Action. (#2580)

2017-08-07 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 49ac756  Temporalily use published swift3Action. (#2580)
49ac756 is described below

commit 49ac756064905da0bcba8ed9978a3e29dd8c04dc
Author: Markus Thömmes 
AuthorDate: Mon Aug 7 14:00:50 2017 +0200

Temporalily use published swift3Action. (#2580)

The download of Swift 3.0.2 on swift.org is broken and will only be revived 
in a couple of days. This effectively freezes the swift3Action image to the 
latest published version on Dockerhub. There are no changes imminent anyway.
---
 core/swift3Action/Dockerfile | 69 +---
 1 file changed, 1 insertion(+), 68 deletions(-)

diff --git a/core/swift3Action/Dockerfile b/core/swift3Action/Dockerfile
index aa766d4..1324400 100644
--- a/core/swift3Action/Dockerfile
+++ b/core/swift3Action/Dockerfile
@@ -1,68 +1 @@
-# Dockerfile for swift actions, overrides and extends ActionRunner from 
actionProxy
-# This Dockerfile is partially based on: 
https://github.com/swiftdocker/docker-swift/
-FROM buildpack-deps:trusty
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Upgrade and install basic Python dependencies
-RUN apt-get -y purge \
- && apt-get -y update \
- && apt-get -y install --fix-missing python2.7 python-gevent python-flask \
-\
-# Upgrade and install Swift dependencies
- && apt-get -y install --fix-missing build-essential curl wget libicu-dev \
-\
-# Install zip for compiling Swift actions
- && apt-get -y install zip \
-\
-# Clean up
- && apt-get clean
-
-# Install clang manually, since SPM wants at least Clang 3-6
-RUN cd / &&\
-(curl -L -k 
http://llvm.org/releases/3.6.2/clang+llvm-3.6.2-x86_64-linux-gnu-ubuntu-14.04.tar.xz
 | tar xJ) &&\
-cp -r /clang+llvm-3.6.2-x86_64-linux-gnu-ubuntu-14.04/* /usr/ &&\
-rm -rf /clang+llvm-3.6.2-x86_64-linux-gnu-ubuntu-14.04
-
-RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/clang++ 20
-RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/clang 20
-
-# Install Swift keys
-RUN wget --no-verbose -O - https://swift.org/keys/all-keys.asc | gpg --import 
- && \
-gpg --keyserver hkp://pool.sks-keyservers.net --refresh-keys Swift
-
-# Install Swift Ubuntu 14.04 Snapshot
-#https://swift.org/builds/swift-3.0.1-release/ubuntu1404/swift-3.0.1-RELEASE/swift-3.0.1-RELEASE-ubuntu14.04.tar.gz
-
-ENV SWIFT_VERSION 3.0.2
-ENV SWIFT_RELEASE_TYPE RELEASE
-ENV SWIFT_PLATFORM ubuntu14.04
-
-RUN 
SWIFT_ARCHIVE_NAME=swift-$SWIFT_VERSION-$SWIFT_RELEASE_TYPE-$SWIFT_PLATFORM && \
-SWIFT_URL=https://swift.org/builds/swift-$SWIFT_VERSION-$(echo 
"$SWIFT_RELEASE_TYPE" | tr '[:upper:]' '[:lower:]')/$(echo "$SWIFT_PLATFORM" | 
tr -d .)/swift-$SWIFT_VERSION-$SWIFT_RELEASE_TYPE/$SWIFT_ARCHIVE_NAME.tar.gz && 
\
-echo $SWIFT_URL && \
-wget --no-verbose $SWIFT_URL && \
-wget --no-verbose $SWIFT_URL.sig && \
-gpg --verify $SWIFT_ARCHIVE_NAME.tar.gz.sig && \
-tar -xzf $SWIFT_ARCHIVE_NAME.tar.gz --directory / --strip-components=1 && \
-rm -rf $SWIFT_ARCHIVE_NAME* /tmp/* /var/tmp/*
-
-# Add the action proxy
-RUN mkdir -p /actionProxy
-ADD actionproxy.py /actionProxy
-
-# Add files needed to build and run action
-RUN mkdir -p /swift3Action
-ADD epilogue.swift /swift3Action
-ADD buildandrecord.py /swift3Action
-ADD swift3runner.py /swift3Action
-ADD spm-build /swift3Action/spm-build
-
-
-# Build kitura net
-RUN touch /swift3Action/spm-build/main.swift
-RUN python /swift3Action/buildandrecord.py && rm 
/swift3Action/spm-build/.build/release/Action
-#RUN cd /swift3Action/spm-build; swift build -c release; rm 
/swift3Action/spm-build/.build/release/Action
-ENV FLASK_PROXY_PORT 8080
-
-CMD ["/bin/bash", "-c", "cd /swift3Action && PYTHONIOENCODING='utf-8' python 
-u swift3runner.py"]
\ No newline at end of file
+FROM openwhisk/swift3action
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Add JMX arguments to docker-machine again. (#2581)

2017-08-07 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 8be4cdf  Add JMX arguments to docker-machine again. (#2581)
8be4cdf is described below

commit 8be4cdf6b2d3de67f46b5a1fb9e5911a62dca8f7
Author: Markus Thömmes 
AuthorDate: Mon Aug 7 15:40:37 2017 +0200

Add JMX arguments to docker-machine again. (#2581)
---
 ansible/environments/docker-machine/group_vars/all | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/ansible/environments/docker-machine/group_vars/all 
b/ansible/environments/docker-machine/group_vars/all
index 82a3e11..1ce7bb2 100644
--- a/ansible/environments/docker-machine/group_vars/all
+++ b/ansible/environments/docker-machine/group_vars/all
@@ -38,3 +38,6 @@ apigw_host_v2: "http://{{ groups['apigateway']|first 
}}:{{apigateway.port.api}}/
 
 # RunC enablement
 invoker_use_runc: true
+
+controller_arguments: '-Dcom.sun.management.jmxremote 
-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false 
-Dcom.sun.management.jmxremote.port=1098'
+invoker_arguments: "{{ controller_arguments }}"
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Remove spray-caching dependency. (#2628)

2017-08-18 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 68c5e35  Remove spray-caching dependency. (#2628)
68c5e35 is described below

commit 68c5e35a25b6dbdbf8a58f24384204b5a57edce6
Author: Markus Thömmes 
AuthorDate: Fri Aug 18 10:47:07 2017 +0200

Remove spray-caching dependency. (#2628)

Spray-caching is the caching module of the by us no longer used spray-http 
server. Our own implementation of the cache is there already anyway and keeping 
spray-caching binds us to Scala 2.11.
---
 common/scala/build.gradle  |  1 -
 .../scala/whisk/core/database/InMemoryCache.scala  | 73 --
 .../MultipleReadersSingleWriterCache.scala | 46 +-
 3 files changed, 29 insertions(+), 91 deletions(-)

diff --git a/common/scala/build.gradle b/common/scala/build.gradle
index e1d0535..2dc406a 100644
--- a/common/scala/build.gradle
+++ b/common/scala/build.gradle
@@ -11,7 +11,6 @@ repositories {
 dependencies {
 compile "org.scala-lang:scala-library:${gradle.scala.version}"
 
-compile 'io.spray:spray-caching_2.11:1.3.4'
 compile 'io.spray:spray-json_2.11:1.3.3'
 
 compile 'com.typesafe.akka:akka-actor_2.11:2.4.16'
diff --git 
a/common/scala/src/main/scala/whisk/core/database/InMemoryCache.scala 
b/common/scala/src/main/scala/whisk/core/database/InMemoryCache.scala
deleted file mode 100644
index fd2b3c9..000
--- a/common/scala/src/main/scala/whisk/core/database/InMemoryCache.scala
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package whisk.core.database
-
-import scala.concurrent.Future
-import spray.caching.Cache
-import spray.caching.LruCache
-import whisk.common.Logging
-import whisk.common.TransactionId
-import scala.concurrent.ExecutionContext
-import whisk.common.LoggingMarkers
-
-trait InMemoryCache[W] {
-
-/** Toggle to enable/disable caching. */
-protected def cacheEnabled = false
-
-protected def cacheKeys(w: W): Set[Any] = Set(w)
-
-protected def cacheInvalidate(keys: Set[Any])(
-implicit transid: TransactionId, logger: Logging): Unit = {
-if (cacheEnabled) {
-logger.info(this, s"invalidating $keys")
-keys foreach { k => cache remove k }
-}
-}
-
-protected def cacheLookup[Wsuper >: W](
-datastore: ArtifactStore[Wsuper],
-key: Any,
-future: => Future[W],
-fromCache: Boolean = cacheEnabled)(
-implicit transid: TransactionId, logger: Logging) = {
-if (fromCache) {
-implicit val ec = datastore.executionContext
-cache.get(key) map { v =>
-transid.mark(this, LoggingMarkers.DATABASE_CACHE_HIT, s"[GET] 
serving from cache: $key")(logger)
-v
-} getOrElse {
-transid.mark(this, LoggingMarkers.DATABASE_CACHE_MISS, s"[GET] 
serving from datastore: $key")(logger)
-future flatMap {
-// cache result of future iff it was successful
-cache(key)(_)
-}
-}
-} else future
-}
-
-protected def cacheUpdate(keys: Set[Any], w: W)(
-implicit transid: TransactionId, logger: Logging, ec: 
ExecutionContext) = {
-if (cacheEnabled) {
-logger.info(this, s"caching $keys")
-keys foreach { cache(_) { w } }
-}
-}
-
-private val cache: Cache[W] = LruCache()
-}
diff --git 
a/common/scala/src/main/scala/whisk/core/database/MultipleReadersSingleWriterCache.scala
 
b/common/scala/src/main/scala/whisk/core/database/MultipleReadersSingleWriterCache.scala
index 79f2ac8..d824948 100644
--- 
a/common/scala/src/main/scala/whisk/core/database/MultipleReadersSingleWriterCache.scala
+++ 
b/common/scala/src/main/scala/whisk/core/database/MultipleReadersSingleWriterCache.scala
@@ -15,6 +15,11 @@
  * limi

[incubator-openwhisk] branch master updated: Bump toStrictEntity timeout to 30 seconds. (#2644)

2017-08-21 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new ed0f741  Bump toStrictEntity timeout to 30 seconds. (#2644)
ed0f741 is described below

commit ed0f7413cdbace41fba796deb7ef05cfe1ef2ac1
Author: Markus Thömmes 
AuthorDate: Mon Aug 21 14:36:07 2017 +0200

Bump toStrictEntity timeout to 30 seconds. (#2644)

1 second is a rather small timeout for `toStrictEntity`. If a garbage 
collection hits around it, requests might randomly and unnecessary fail. 
Bumping that timeout to something quite high shouldn't impact general 
operations at all.
---
 common/scala/src/main/scala/whisk/http/BasicHttpService.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/common/scala/src/main/scala/whisk/http/BasicHttpService.scala 
b/common/scala/src/main/scala/whisk/http/BasicHttpService.scala
index 1df098e..0c92146 100644
--- a/common/scala/src/main/scala/whisk/http/BasicHttpService.scala
+++ b/common/scala/src/main/scala/whisk/http/BasicHttpService.scala
@@ -92,7 +92,7 @@ trait BasicHttpService extends Directives with 
TransactionCounter {
 prioritizeRejections {
 DebuggingDirectives.logRequest(logRequestInfo _) {
 DebuggingDirectives.logRequestResult(logResponseInfo 
_) {
-toStrictEntity(1.second) {
+toStrictEntity(30.seconds) {
 routes
 }
 }

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Readd BasicHttpService log line (#2654)

2017-08-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 0ba4418  Readd BasicHttpService log line (#2654)
0ba4418 is described below

commit 0ba4418c0bef83a9a04fdf1dfbfc23079fe22cd8
Author: Markus Thömmes 
AuthorDate: Tue Aug 22 10:27:35 2017 +0200

Readd BasicHttpService log line (#2654)
---
 .../main/scala/whisk/http/BasicHttpService.scala   | 24 +-
 1 file changed, 10 insertions(+), 14 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/http/BasicHttpService.scala 
b/common/scala/src/main/scala/whisk/http/BasicHttpService.scala
index 0c92146..502d0e9 100644
--- a/common/scala/src/main/scala/whisk/http/BasicHttpService.scala
+++ b/common/scala/src/main/scala/whisk/http/BasicHttpService.scala
@@ -20,17 +20,13 @@ package whisk.http
 import scala.collection.immutable.Seq
 import scala.concurrent.Await
 import scala.concurrent.duration.DurationInt
-
 import akka.actor.ActorSystem
 import akka.event.Logging
 import akka.http.scaladsl.Http
 import akka.http.scaladsl.model._
 import akka.http.scaladsl.model.HttpRequest
-import akka.http.scaladsl.server.Directives
-import akka.http.scaladsl.server.RejectionHandler
-import akka.http.scaladsl.server.Route
+import akka.http.scaladsl.server._
 import akka.http.scaladsl.server.RouteResult.Rejected
-import akka.http.scaladsl.server.UnacceptedResponseContentTypeRejection
 import akka.http.scaladsl.server.directives.DebuggingDirectives
 import akka.http.scaladsl.server.directives.LogEntry
 import akka.stream.ActorMaterializer
@@ -68,7 +64,7 @@ trait BasicHttpService extends Directives with 
TransactionCounter {
  * Gets the log level for a given route. The default is
  * InfoLevel so override as needed.
  *
- * @param the route
+ * @param route the route to determine the loglevel for
  * @return a log level for the route
  */
 def loglevelForRoute(route: String): Logging.LogLevel = Logging.InfoLevel
@@ -88,10 +84,10 @@ trait BasicHttpService extends Directives with 
TransactionCounter {
  */
 def route: Route = {
 assignId { implicit transid =>
-handleRejections(customRejectionHandler) {
-prioritizeRejections {
-DebuggingDirectives.logRequest(logRequestInfo _) {
-DebuggingDirectives.logRequestResult(logResponseInfo 
_) {
+DebuggingDirectives.logRequest(logRequestInfo _) {
+DebuggingDirectives.logRequestResult(logResponseInfo _) {
+handleRejections(customRejectionHandler) {
+prioritizeRejections {
 toStrictEntity(30.seconds) {
 routes
 }
@@ -107,16 +103,16 @@ trait BasicHttpService extends Directives with 
TransactionCounter {
 
 /** Generates log entry for every request. */
 protected def logRequestInfo(req: HttpRequest)(implicit tid: 
TransactionId): LogEntry = {
-val m = req.method.name.toString
+val m = req.method.name
 val p = req.uri.path.toString
 val q = req.uri.query().toString
 val l = loglevelForRoute(p)
 LogEntry(s"[$tid] $m $p $q", l)
 }
 
-protected def logResponseInfo(req: HttpRequest)(implicit tid: 
TransactionId): Any => Option[LogEntry] = {
-case res: HttpResponse =>
-val m = req.method.toString
+protected def logResponseInfo(req: HttpRequest)(implicit tid: 
TransactionId): RouteResult => Option[LogEntry] = {
+case RouteResult.Complete(res: HttpResponse) =>
+val m = req.method.name
 val p = req.uri.path.toString
 val l = loglevelForRoute(p)
 

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Apply prereqs serially to prevent installation conflicts. (#2664)

2017-08-23 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 22799dc  Apply prereqs serially to prevent installation conflicts. 
(#2664)
22799dc is described below

commit 22799dcde3558ff9f7a213d496ec7dc9f57a86b3
Author: Markus Thömmes 
AuthorDate: Thu Aug 24 07:51:27 2017 +0200

Apply prereqs serially to prevent installation conflicts. (#2664)

Using the same host for different "machines" seems to cause trouble when 
trying to concurrently install packages.
---
 ansible/prereq.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/ansible/prereq.yml b/ansible/prereq.yml
index 3b6b1a0..aeeb73c 100644
--- a/ansible/prereq.yml
+++ b/ansible/prereq.yml
@@ -3,5 +3,6 @@
 # It will install all necessary packages to run Openwhisk playbooks.  
 
 - hosts: all:!ansible
+  serial: 1
   roles:
   - prereq

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Wait for view in AuthenticateTests. (#2694)

2017-09-05 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new c50d9be  Wait for view in AuthenticateTests. (#2694)
c50d9be is described below

commit c50d9bec5e86a33911e7867de82a03e9462d54bd
Author: Markus Thömmes 
AuthorDate: Tue Sep 5 13:58:12 2017 +0200

Wait for view in AuthenticateTests. (#2694)
---
 tests/src/test/scala/whisk/core/controller/test/AuthenticateTests.scala | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/tests/src/test/scala/whisk/core/controller/test/AuthenticateTests.scala 
b/tests/src/test/scala/whisk/core/controller/test/AuthenticateTests.scala
index 2c6415b..ad4789d 100644
--- a/tests/src/test/scala/whisk/core/controller/test/AuthenticateTests.scala
+++ b/tests/src/test/scala/whisk/core/controller/test/AuthenticateTests.scala
@@ -57,6 +57,7 @@ class AuthenticateTests extends ControllerTestCommon with 
Authenticate {
 // Try to login with each specific namespace
 namespaces.foreach { ns =>
 println(s"Trying to login to $ns")
+waitOnView(authStore, ns.authkey, 1) // wait for the view to be 
updated
 val pass = BasicHttpCredentials(ns.authkey.uuid.asString, 
ns.authkey.key.asString)
 val user = Await.result(validateCredentials(Some(pass)), 
dbOpTimeout)
 user.get shouldBe Identity(subject, ns.name, ns.authkey, 
Privilege.ALL)

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Bump replication timeout, abort test with correct message (#2696)

2017-09-05 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 3ebc67e  Bump replication timeout, abort test with correct message 
(#2696)
3ebc67e is described below

commit 3ebc67e5641ff44653015cd9c074ed6aaf98ee37
Author: Markus Thömmes 
AuthorDate: Tue Sep 5 14:00:14 2017 +0200

Bump replication timeout, abort test with correct message (#2696)
---
 .../src/test/scala/whisk/core/database/test/ReplicatorTests.scala  | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git 
a/tests/src/test/scala/whisk/core/database/test/ReplicatorTests.scala 
b/tests/src/test/scala/whisk/core/database/test/ReplicatorTests.scala
index a3d7e02..0090b0e 100644
--- a/tests/src/test/scala/whisk/core/database/test/ReplicatorTests.scala
+++ b/tests/src/test/scala/whisk/core/database/test/ReplicatorTests.scala
@@ -104,7 +104,8 @@ class ReplicatorTests extends FlatSpec
 
 /** Wait for a replication to finish */
 def waitForReplication(dbName: String) = {
-waitfor(() => {
+val timeout = 5.minutes
+val replicationResult = waitfor(() => {
 val replicatorDoc = replicatorClient.getDoc(dbName).futureValue
 replicatorDoc shouldBe 'right
 
@@ -112,7 +113,9 @@ class ReplicatorTests extends FlatSpec
 println(s"Waiting for replication, state: $state")
 
 state.contains("completed".toJson)
-}, totalWait = 2.minutes)
+}, totalWait = timeout)
+
+assert(replicationResult, s"replication did not finish in $timeout")
 }
 
 /** Compares to databases to full equality */

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Fix false assertion, refactor CacheInvalidationTests (#2698)

2017-09-05 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new ff43578  Fix false assertion, refactor CacheInvalidationTests (#2698)
ff43578 is described below

commit ff43578511c6f96311065ce7b90a1a1b1863ae2e
Author: Markus Thömmes 
AuthorDate: Wed Sep 6 08:06:00 2017 +0200

Fix false assertion, refactor CacheInvalidationTests (#2698)
---
 .../src/test/scala/ha/CacheInvalidationTests.scala | 70 +-
 1 file changed, 28 insertions(+), 42 deletions(-)

diff --git a/tests/src/test/scala/ha/CacheInvalidationTests.scala 
b/tests/src/test/scala/ha/CacheInvalidationTests.scala
index ecda623..5698ee3 100644
--- a/tests/src/test/scala/ha/CacheInvalidationTests.scala
+++ b/tests/src/test/scala/ha/CacheInvalidationTests.scala
@@ -28,11 +28,7 @@ import org.scalatest.junit.JUnitRunner
 import akka.http.scaladsl.Http
 import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
 import akka.http.scaladsl.marshalling.Marshal
-import akka.http.scaladsl.model.HttpMethods
-import akka.http.scaladsl.model.HttpRequest
-import akka.http.scaladsl.model.RequestEntity
-import akka.http.scaladsl.model.StatusCodes
-import akka.http.scaladsl.model.Uri
+import akka.http.scaladsl.model._
 import akka.http.scaladsl.model.headers.Authorization
 import akka.http.scaladsl.model.headers.BasicHttpCredentials
 import akka.http.scaladsl.unmarshalling.Unmarshal
@@ -43,7 +39,6 @@ import common.WskTestHelpers
 import spray.json._
 import spray.json.DefaultJsonProtocol._
 import whisk.core.WhiskConfig
-import akka.http.scaladsl.model.StatusCode
 
 @RunWith(classOf[JUnitRunner])
 class CacheInvalidationTests
@@ -55,31 +50,34 @@ class CacheInvalidationTests
 implicit val materializer = ActorMaterializer()
 
 val hosts = WhiskProperties.getProperty("controller.hosts").split(",")
-val authKey = 
WhiskProperties.readAuthKey(WhiskProperties.getAuthFileForTesting)
+def ports(instance: Int) = WhiskProperties.getControllerBasePort + instance
+
+def controllerUri(instance: Int) = {
+require(instance >= 0 && instance < hosts.length, "Controller instance 
not known.")
+
Uri().withScheme("http").withHost(hosts(instance)).withPort(ports(instance))
+}
+def actionPath(name: String) = 
Uri.Path(s"/api/v1/namespaces/_/actions/$name")
+
+val Array(username, password) = 
WhiskProperties.readAuthKey(WhiskProperties.getAuthFileForTesting).split(":")
+val authHeader = Authorization(BasicHttpCredentials(username, password))
 
 val timeout = 15.seconds
 
 def retry[T](fn: => T) = whisk.utils.retry(fn, 15, Some(1.second))
 
 def updateAction(name: String, code: String, controllerInstance: Int = 0) 
= {
-require(controllerInstance >= 0 && controllerInstance < hosts.length, 
"Controller instance not known.")
-
-val host = hosts(controllerInstance)
-val port = WhiskProperties.getControllerBasePort + controllerInstance
-
 val body = JsObject("namespace" -> JsString("_"), "name" -> 
JsString(name), "exec" -> JsObject("kind" -> JsString("nodejs:default"), "code" 
-> JsString(code)))
 
 val request = Marshal(body).to[RequestEntity].flatMap { entity =>
 Http().singleRequest(HttpRequest(
 method = HttpMethods.PUT,
-uri = 
Uri().withScheme("http").withHost(host).withPort(port).withPath(Uri.Path(s"/api/v1/namespaces/_/actions/$name")).withQuery(Uri.Query("overwrite"
 -> true.toString)),
-headers = 
List(Authorization(BasicHttpCredentials(authKey.split(":")(0), 
authKey.split(":")(1,
+uri = 
controllerUri(controllerInstance).withPath(actionPath(name)).withQuery(Uri.Query("overwrite"
 -> true.toString)),
+headers = List(authHeader),
 entity = entity)).flatMap { response =>
-val action = Unmarshal(response).to[JsObject].map { resBody =>
+Unmarshal(response).to[JsObject].map { resBody =>
 withClue(s"Error in Body: $resBody")(response.status 
shouldBe StatusCodes.OK)
 resBody
 }
-action
 }
 }
 
@@ -87,51 +85,40 @@ class CacheInvalidationTests
 }
 
 def getAction(name: String, controllerInstance: Int = 0, expectedCode: 
StatusCode = StatusCodes.OK) = {
-require(controllerInstance >= 0 && controllerInstance < hosts.length, 
"Controller instance not known.")
-
-val host = hosts(controllerInstance)
-   

[incubator-openwhisk] branch master updated: Remove redundant view which predates db split into activations and assets. (#2735)

2017-09-20 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new eb10667  Remove redundant view which predates db split into 
activations and assets. (#2735)
eb10667 is described below

commit eb1066772489f6dd23787074607f51d04b1cad5a
Author: rodric rabbah 
AuthorDate: Wed Sep 20 08:37:31 2017 -0400

Remove redundant view which predates db split into activations and assets. 
(#2735)

* Remove references to view that is no longer used.

It predates db split into activations and assets.
Also remove support for listing public packags which has been forceable 
disabled.
This should be redesigned as a separate discover API.
Normalize post-processing of list operations using Either.fold.
Refactor query methods and remove some deadcode.

* Rename aname to aname() because it's side-effecting.

* Remove references to 'all' view for activations db.
---
 .../scala/whisk/core/entity/WhiskActivation.scala  |  33 ++-
 .../main/scala/whisk/core/entity/WhiskStore.scala  | 178 +++---
 .../main/scala/whisk/core/controller/Actions.scala |   4 +-
 .../scala/whisk/core/controller/Activations.scala  |   8 +-
 .../scala/whisk/core/controller/ApiUtils.scala |   6 +-
 .../scala/whisk/core/controller/Namespaces.scala   |   7 +-
 .../scala/whisk/core/controller/Packages.scala |  46 ++--
 .../main/scala/whisk/core/controller/Rules.scala   |   4 +-
 .../scala/whisk/core/controller/Triggers.scala |   4 +-
 .../core/controller/test/ActionsApiTests.scala |  74 +++---
 .../core/controller/test/ActivationsApiTests.scala |  32 +--
 .../controller/test/PackageActionsApiTests.scala   | 196 +++
 .../core/controller/test/TriggersApiTests.scala|  52 ++--
 .../SequenceActionApiMigrationTests.scala  |  16 +-
 .../scala/whisk/core/database/test/DbUtils.scala   |  38 +--
 .../scala/whisk/core/entity/test/ViewTests.scala   | 273 +++--
 16 files changed, 422 insertions(+), 549 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala 
b/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala
index 60af0fd..3dca9c1 100644
--- a/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala
@@ -19,12 +19,16 @@ package whisk.core.entity
 
 import java.time.Instant
 
+import scala.concurrent.Future
 import scala.util.Try
 
+import spray.json._
 import spray.json.DefaultJsonProtocol
 import spray.json.DefaultJsonProtocol._
-import spray.json._
+import whisk.common.TransactionId
+import whisk.core.database.ArtifactStore
 import whisk.core.database.DocumentFactory
+import whisk.core.database.StaleParameter
 
 /**
  * A WhiskActivation provides an abstraction of the meta-data
@@ -44,7 +48,7 @@ import whisk.core.database.DocumentFactory
  * @param logs the activation logs
  * @param version the semantic version (usually matches the activated entity)
  * @param publish true to share the activation or false otherwise
- * @param annotation the set of annotations to attribute to the activation
+ * @param annotations the set of annotations to attribute to the activation
  * @param duration of the activation in milliseconds
  * @throws IllegalArgumentException if any required argument is undefined
  */
@@ -71,6 +75,7 @@ case class WhiskActivation(namespace: EntityPath,
 
   def toJson = WhiskActivation.serdes.write(this).asJsObject
 
+  /** This the activation summary as computed by the database view. Strictly 
used for testing. */
   override def summaryAsJson = {
 val JsObject(fields) = super.summaryAsJson
 JsObject(fields + ("activationId" -> activationId.toJson))
@@ -119,4 +124,28 @@ object WhiskActivation
   // Caching activations doesn't make much sense in the common case as usually,
   // an activation is only asked for once.
   override val cacheEnabled = false
+
+  /**
+   * Queries datastore for activation records which have an entity name 
matching the
+   * given parameter.
+   *
+   * @return list of records as JSON object if docs parameter is false, as Left
+   * and a list of the WhiskActivations if including the full record, 
as Right
+   */
+  def listActivationsMatchingName(db: ArtifactStore[WhiskActivation],
+  namespace: EntityPath,
+  name: EntityName,
+  skip: Int,
+  limit: Int,
+  docs: Boolean = false,
+  since: Option[Instant] = None,
+  upto: Option[Instant] = None,
+  stale: StaleParameter = StalePar

[incubator-openwhisk] branch master updated: Make logging hot paths lighter. (#2784)

2017-09-21 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 52f41d4  Make logging hot paths lighter. (#2784)
52f41d4 is described below

commit 52f41d4bab4a4c9045004e89b4708fe06d458396
Author: Markus Thömmes 
AuthorDate: Thu Sep 21 15:47:47 2017 +0200

Make logging hot paths lighter. (#2784)

Logging is **the** hotspot in our application today. This takes a first 
stab at making the overhead a bit smaller by respecting the set log-level as 
early as currently possible and not do any extranous throwaway computation.

It also reduces a bit of unnecessary boxing for the sake of just trying to 
keep a single space out of the message.
---
 common/scala/src/main/scala/whisk/common/Logging.scala   | 11 +++
 common/scala/src/main/scala/whisk/common/TransactionId.scala |  4 +---
 2 files changed, 4 insertions(+), 11 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/common/Logging.scala 
b/common/scala/src/main/scala/whisk/common/Logging.scala
index 92c921a..b86a72d 100644
--- a/common/scala/src/main/scala/whisk/common/Logging.scala
+++ b/common/scala/src/main/scala/whisk/common/Logging.scala
@@ -85,15 +85,10 @@ trait Logging {
  */
 class AkkaLogging(loggingAdapter: LoggingAdapter) extends Logging {
   def emit(loglevel: LogLevel, id: TransactionId, from: AnyRef, message: 
String) = {
-val name = if (from.isInstanceOf[String]) from else 
Logging.getCleanSimpleClassName(from.getClass)
-
-val logMessage = Seq(message).collect {
-  case msg if msg.nonEmpty =>
-msg.split('\n').map(_.trim).mkString(" ")
+if (loggingAdapter.isEnabled(loglevel)) {
+  val name = if (from.isInstanceOf[String]) from else 
Logging.getCleanSimpleClassName(from.getClass)
+  loggingAdapter.log(loglevel, s"[$id] [$name] $message")
 }
-
-val parts = Seq(s"[$id]") ++ Seq(s"[$name]") ++ logMessage
-loggingAdapter.log(loglevel, parts.mkString(" "))
   }
 }
 
diff --git a/common/scala/src/main/scala/whisk/common/TransactionId.scala 
b/common/scala/src/main/scala/whisk/common/TransactionId.scala
index 0023b8b..1e04ce6 100644
--- a/common/scala/src/main/scala/whisk/common/TransactionId.scala
+++ b/common/scala/src/main/scala/whisk/common/TransactionId.scala
@@ -137,9 +137,7 @@ case class TransactionId private (meta: 
TransactionMetadata) extends AnyVal {
* @param message: The log message without the marker
* @param marker: The marker to add to the message
*/
-  private def createMessageWithMarker(message: String, marker: LogMarker): 
String = {
-(Option(message).filter(_.trim.nonEmpty) ++ Some(marker)).mkString(" ")
-  }
+  private def createMessageWithMarker(message: String, marker: LogMarker): 
String = s"$message $marker"
 }
 
 /**

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Exclude sequences from activation log cleanup. (#2801)

2017-09-27 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 5ef17f1  Exclude sequences from activation log cleanup. (#2801)
5ef17f1 is described below

commit 5ef17f1e2744ad0c9a35d97a7323bbf76f5032f0
Author: rodric rabbah 
AuthorDate: Thu Sep 28 02:07:41 2017 -0400

Exclude sequences from activation log cleanup. (#2801)
---
 ansible/files/activations_design_document_for_activations_db.json | 2 +-
 ansible/files/logCleanup_design_document_for_activations_db.json  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/ansible/files/activations_design_document_for_activations_db.json 
b/ansible/files/activations_design_document_for_activations_db.json
index f6a6240..565c1e9 100644
--- a/ansible/files/activations_design_document_for_activations_db.json
+++ b/ansible/files/activations_design_document_for_activations_db.json
@@ -2,7 +2,7 @@
   "_id": "_design/activations",
   "views": {
 "byDate": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n\n  if 
(isActivation(doc)) try {\nemit(doc.start, [doc._id, doc._rev])\n  } catch 
(e) {}\n}"
+  "map": "function (doc) {\n  if (doc.activationId !== undefined) try {\n  
  emit(doc.start, [doc._id, doc._rev]);\n  } catch (e) {}\n}"
 }
   },
   "language": "javascript"
diff --git a/ansible/files/logCleanup_design_document_for_activations_db.json 
b/ansible/files/logCleanup_design_document_for_activations_db.json
index b269823..9a97a5e 100644
--- a/ansible/files/logCleanup_design_document_for_activations_db.json
+++ b/ansible/files/logCleanup_design_document_for_activations_db.json
@@ -2,7 +2,7 @@
   "_id": "_design/logCleanup",
   "views": {
 "byDateWithLogs": {
-  "map": "function (doc) {\n  if (doc.activationId !== undefined && 
doc.logs && doc.logs.length > 0) {\nemit(doc.start, doc._id);\n  }\n}"
+  "map": "function (doc) {\n  if (doc.activationId !== undefined && 
doc.logs && doc.logs.length > 0) try {\nvar deleteLogs = true;\nfor (i 
= 0; i < doc.annotations.length; i++) {\n  var a = doc.annotations[i];\n
  if (a.key == \"kind\") {\ndeleteLogs = a.value != \"sequence\";\n 
   break;\n  }\n}\nif (deleteLogs) {\n  emit(doc.start, 
doc._id);\n}\n  } catch (e) {}\n}"
 }
   },
   "language": "javascript"

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Share bookkeeping data across controllers (#2531)

2017-09-29 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 363d571  Share bookkeeping data across controllers (#2531)
363d571 is described below

commit 363d5714daf4cc87b5be220013d9edab65ffcfbc
Author: Vadim Raskin 
AuthorDate: Fri Sep 29 14:47:47 2017 +0200

Share bookkeeping data across controllers (#2531)

* Use akka distributed map to store the shared state

* Join seed nodes in the load balancer service

* Add optional auto-down-unreachable-after

* Local bookkeeping is used by default

* Update documentation
---
 ansible/group_vars/all |   9 +
 ansible/roles/controller/tasks/deploy.yml  |  15 +-
 common/scala/build.gradle  |   5 +-
 .../scala/src/main/scala/whisk/common/Config.scala |  10 +
 .../src/main/scala/whisk/core/WhiskConfig.scala|  10 +-
 core/controller/build.gradle   |   1 +
 .../controller/src/main/resources/application.conf |  27 ++
 .../scala/whisk/core/controller/Controller.scala   |   4 +-
 .../core/entitlement/ActivationThrottler.scala |  31 ++-
 .../scala/whisk/core/entitlement/Entitlement.scala |  31 ++-
 .../loadBalancer/DistributedLoadBalancerData.scala |  90 +++
 .../whisk/core/loadBalancer/LoadBalancerData.scala |  65 +
 .../core/loadBalancer/LoadBalancerService.scala|  52 ++--
 .../core/loadBalancer/LocalLoadBalancerData.scala  |  76 ++
 .../core/loadBalancer/SeedNodesProvider.scala  |  41 +++
 .../core/loadBalancer/SharedDataService.scala  |  98 +++
 docs/README.md |   1 +
 docs/deploy.md |  16 ++
 tests/build.gradle |   1 +
 .../controller/test/ControllerTestCommon.scala |   9 +-
 .../loadBalancer/test/LoadBalancerDataTests.scala  | 292 ++---
 .../loadBalancer/test/SeedNodesProviderTest.scala  |  58 
 .../loadBalancer/test/SharedDataServiceTests.scala |  91 +++
 23 files changed, 825 insertions(+), 208 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index c288dbf..88ce571 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -108,6 +108,15 @@ controller:
   arguments: "{{ controller_arguments | default('') }}"
   blackboxFraction: "{{ controller_blackbox_fraction | default(0.10) }}"
   instances: "{{ groups['controllers'] | length }}"
+  localBookkeeping: "{{ controller_local_bookkeeping | default('true') }}"
+  akka:
+provider: cluster
+cluster:
+  basePort: 8000
+  host: "{{ groups['controllers'] | map('extract', hostvars, 
'ansible_host') | list }}"
+  bindPort: 2551
+  # at this moment all controllers are seed nodes
+  seedNodes: "{{ groups['controllers'] | map('extract', hostvars, 
'ansible_host') | list }}"
 
 registry:
   confdir: "{{ config_root_dir }}/registry"
diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index 1b791ef..a805d75 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -16,6 +16,12 @@
 mode: 0777
   become: "{{ logs.dir.become }}"
 
+- name: create seed nodes list
+  set_fact:
+seed_nodes_list: "{{ seed_nodes_list | default([]) }} + [ 
\"{{item.1}}:{{controller.akka.cluster.basePort+item.0}}\" ]"
+  with_indexed_items:
+  - "{{ controller.akka.cluster.seedNodes }}"
+
 - name: (re)start controller
   docker_container:
 name: controller{{ groups['controllers'].index(inventory_hostname) }}
@@ -58,11 +64,18 @@
   "LOADBALANCER_INVOKERBUSYTHRESHOLD": "{{ invoker.busyThreshold }}"
 
   "RUNTIMES_MANIFEST": "{{ runtimesManifest | to_json }}"
+  "CONTROLLER_LOCALBOOKKEEPING": "{{ controller.localBookkeeping }}"
+  "AKKA_CLUSTER_PORT": "{{ controller.akka.cluster.basePort + 
groups['controllers'].index(inventory_hostname) }}"
+  "AKKA_CLUSTER_HOST": "{{ 
controller.akka.cluster.host[groups['controllers'].index(inventory_hostname)] 
}}"
+  "AKKA_CLUSTER_SEED_NODES": "{{seed_nodes_list | join(' ') }}"
+  "AKKA_CLUSTER_BIND_PORT": "{{ controller.akka.cluster.bindPort }}"
+  "AKKA_ACTOR_PROVIDER": "{{ controller.akka.provider }}"
 volumes:
   - "{{ whisk_logs_dir }}/controller{{ 
groups['controllers'].index(inventory_h

[incubator-openwhisk.wiki] branch master updated: Updated Contributing: Git guidelines (markdown)

2017-10-06 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.wiki.git


The following commit(s) were added to refs/heads/master by this push:
 new 38fa342  Updated Contributing: Git guidelines (markdown)
38fa342 is described below

commit 38fa3424a4508070c403554d44ee610cf595fe52
Author: Christian Bickel 
AuthorDate: Fri Oct 6 09:52:54 2017 +0200

Updated Contributing: Git guidelines (markdown)
---
 Contributing:-Git-guidelines.md | 21 +++--
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/Contributing:-Git-guidelines.md b/Contributing:-Git-guidelines.md
index db27b43..8eef037 100644
--- a/Contributing:-Git-guidelines.md
+++ b/Contributing:-Git-guidelines.md
@@ -48,22 +48,15 @@ For full documentation on the scancode utility, please 
reference its README:
 
 ### Scala
 
-Scala is formatted using [scalafmt](http://scalameta.org/scalafmt/). Its 
configuration can be found in [.scalafmt.conf](../.scalafmt.conf). To 
automatically reformat **changed** Scala files upon commit, add the following 
to your `pre-commit` hook.
+Scala is formatted using [scalafmt](http://scalameta.org/scalafmt/). Its 
configuration can be found in 
[.scalafmt.conf](https://github.com/apache/incubator-openwhisk/tree/master/.scalafmt.conf).
 To automatically reformat changed Scala files upon commit, you can use
+the `git pre-commit` hook scripts in 
[tools/git](https://github.com/apache/incubator-openwhisk/tree/master/tools/git).
 Follow the instructions
+in the 
[README](https://github.com/apache/incubator-openwhisk/blob/master/tools/git/README.md).
 
-```bash
-#!/bin/sh
-
-# determine openwhisk base directory
-root="$(git rev-parse --show-toplevel)"
+For example, add the following to your `git pre-commit` hook:
 
-git diff --name-only --cached | grep '\.scala'
-if [ "$?" -eq 0 ]; then
-# reformat scala code iff a scala file changed
-$root/gradlew scalafmtAll
-fi
-
-# readd all staged files
-git add "$(git diff --name-only --cached)"
+```
+# -- Code formatting --
+/path/to/incubator-openwhisk/tools/git/pre-commit-scalafmt-native.sh
 ```
 
 ## Work with forks

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Add batching time to BatcherTests. (#2861)

2017-10-19 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 8dd6250  Add batching time to BatcherTests. (#2861)
8dd6250 is described below

commit 8dd6250fa98aa2d85635a506e207bdffdab2c4f0
Author: Markus Thömmes 
AuthorDate: Thu Oct 19 10:44:06 2017 +0200

Add batching time to BatcherTests. (#2861)

In an environment with no parallelism some assertion do not hold water 
which makes this test non-deterministic. Adding a bit of a delay between 
batching calls allows the batch to build up.
---
 .../whisk/core/database/test/BatcherTests.scala| 26 +++---
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/tests/src/test/scala/whisk/core/database/test/BatcherTests.scala 
b/tests/src/test/scala/whisk/core/database/test/BatcherTests.scala
index 51f54ba..a2eac6f 100644
--- a/tests/src/test/scala/whisk/core/database/test/BatcherTests.scala
+++ b/tests/src/test/scala/whisk/core/database/test/BatcherTests.scala
@@ -30,18 +30,24 @@ import whisk.utils.retry
 
 import scala.collection.mutable
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext, Future, Promise}
+import scala.concurrent.{Await, Future, Promise}
 
 @RunWith(classOf[JUnitRunner])
 class BatcherTests extends FlatSpec with Matchers with WskActorSystem {
   implicit val materializer: ActorMaterializer = ActorMaterializer()
-  implicit val ec: ExecutionContext = actorSystem.dispatcher
 
   def await[V](f: Future[V]) = Await.result(f, 10.seconds)
 
   def between(start: Instant, end: Instant) =
 Duration.fromNanos(java.time.Duration.between(start, end).toNanos)
 
+  val promiseDelay = 100.milliseconds
+  def resolveDelayed(p: Promise[Unit], delay: FiniteDuration = promiseDelay) =
+akka.pattern.after(delay, actorSystem.scheduler) {
+  p.success(())
+  Future.successful(())
+}
+
   behavior of "Batcher"
 
   it should "batch based on batch size" in {
@@ -60,19 +66,23 @@ class BatcherTests extends FlatSpec with Matchers with 
WskActorSystem {
 val results = values.map(batcher.put)
 
 // First "batch"
-retry(batchOperation.calls should have size 1, 100)
-ps(0).success(())
+retry(batchOperation.calls should have size 1, (promiseDelay.toMillis * 
2).toInt)
 batchOperation.calls(0) should have size 1
 
+// Allow batch to build up
+resolveDelayed(ps(0))
+
 // Second batch
-retry(batchOperation.calls should have size 2, 100)
-ps(1).success(())
+retry(batchOperation.calls should have size 2, (promiseDelay.toMillis * 
2).toInt)
 batchOperation.calls(1) should have size 2
 
+// Allow batch to build up
+resolveDelayed(ps(1))
+
 // Third batch
-retry(batchOperation.calls should have size 3, 100)
-ps(2).success(())
+retry(batchOperation.calls should have size 3, (promiseDelay.toMillis * 
2).toInt)
 batchOperation.calls(2) should have size 2
+ps(2).success(())
 
 await(Future.sequence(results)) shouldBe values.map(transform)
   }

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Cleanup view for entities and augment activations view. (#2760)

2017-11-09 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new b605943  Cleanup view for entities and augment activations view. 
(#2760)
b605943 is described below

commit b605943856d82b4253fa4539137e4e106d2fb07b
Author: rodric rabbah 
AuthorDate: Thu Nov 9 06:45:40 2017 -0500

Cleanup view for entities and augment activations view. (#2760)

- Factor out design doc name to ansible.
- Separate index view into its own design doc. This will cut the main view 
file size in half which will make activation list faster. It replicates the 
view in a separate design doc, so the total savings are zero. The additional 
compute overhead for the ddoc though is worth considering. The upshot: we can 
adjust filters separately.
- Add package prefix if it exists to activation filter.
- Allow for filtering activations by package name.
---
 ...ign_document_for_activations_db_filters_v2.json |  9 ++
 ...isks_design_document_for_activations_db_v2.json |  9 ++
 .../whisks_design_document_for_entities_db_v2.json | 21 +
 ansible/group_vars/all |  3 +
 ansible/roles/controller/tasks/deploy.yml  |  5 +-
 ansible/roles/invoker/tasks/deploy.yml |  3 +
 ansible/tasks/recreateViews.yml|  3 +
 ansible/templates/whisk.properties.j2  |  5 +-
 .../src/main/scala/whisk/core/WhiskConfig.scala| 25 --
 .../main/scala/whisk/core/entity/EntityPath.scala  |  5 ++
 .../scala/whisk/core/entity/WhiskActivation.scala  | 60 ++---
 .../main/scala/whisk/core/entity/WhiskStore.scala  | 51 +---
 .../src/main/scala/whisk/http/ErrorResponse.scala  |  3 +-
 .../scala/whisk/core/controller/Activations.scala  | 55 ++--
 .../main/scala/whisk/core/invoker/Invoker.scala| 15 +++-
 .../core/controller/test/ActivationsApiTests.scala | 59 +++--
 .../scala/whisk/core/database/test/DbUtils.scala   | 11 ++-
 .../scala/whisk/core/entity/test/ViewTests.scala   | 97 +-
 18 files changed, 297 insertions(+), 142 deletions(-)

diff --git 
a/ansible/files/whisks_design_document_for_activations_db_filters_v2.json 
b/ansible/files/whisks_design_document_for_activations_db_filters_v2.json
new file mode 100644
index 000..14eb0b0
--- /dev/null
+++ b/ansible/files/whisks_design_document_for_activations_db_filters_v2.json
@@ -0,0 +1,9 @@
+{
+  "_id": "_design/whisks-filters.v2",
+  "language": "javascript",
+  "views": {
+"activations": {
+  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n  var summarize = 
function (doc) {\nvar endtime = doc.end !== 0 ? doc.end : undefined;\n
return {\nnamespace: doc.namespace,\nname: doc.name,\n
version: doc.version,\npublish: doc.publish,\nannotations: 
doc.annotations,\nactivationId: doc.activationId,\nstart: 
doc.start,\nend: endtim [...]
+}
+  }
+}
diff --git a/ansible/files/whisks_design_document_for_activations_db_v2.json 
b/ansible/files/whisks_design_document_for_activations_db_v2.json
new file mode 100644
index 000..be57879
--- /dev/null
+++ b/ansible/files/whisks_design_document_for_activations_db_v2.json
@@ -0,0 +1,9 @@
+{
+  "_id": "_design/whisks.v2",
+  "language": "javascript",
+  "views": {
+"activations": {
+  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n  var summarize = 
function (doc) {\nvar endtime = doc.end !== 0 ? doc.end : undefined;\n
return {\nnamespace: doc.namespace,\nname: doc.name,\n
version: doc.version,\npublish: doc.publish,\nannotations: 
doc.annotations,\nactivationId: doc.activationId,\nstart: 
doc.start,\nend: endtim [...]
+}
+  }
+}
diff --git a/ansible/files/whisks_design_document_for_entities_db_v2.json 
b/ansible/files/whisks_design_document_for_entities_db_v2.json
new file mode 100644
index 000..97ed91c
--- /dev/null
+++ b/ansible/files/whisks_design_document_for_entities_db_v2.json
@@ -0,0 +1,21 @@
+{
+  "_id": "_design/whisks.v2",
+  "language": "javascript",
+  "views": {
+"rules": {
+  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isRule = 
function (doc) {  return (doc.trigger !== undefined) };\n  if (isRule(doc)) try 
{\nvar ns = doc.namespace.split(PATHSEP);\nvar root = ns[0];\nvar 
date = doc.

[incubator-openwhisk] branch master updated: Enable v2 views. (#2762)

2017-11-16 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new e7830fd  Enable v2 views. (#2762)
e7830fd is described below

commit e7830fdb2aed831914c3ca892819b70b4ef14c6e
Author: rodric rabbah 
AuthorDate: Fri Nov 17 02:34:57 2017 -0500

Enable v2 views. (#2762)

* Enable v2 views.

* Adjust console test in REST client eventhough this test makes no sense to 
me here.
---
 ansible/group_vars/all |  6 +++---
 ansible/logs.yml   |  2 +-
 .../scala/whisk/core/entity/WhiskActivation.scala  |  4 ++--
 .../main/scala/whisk/core/entity/WhiskStore.scala  |  7 ---
 .../scala/whisk/core/controller/Activations.scala  |  2 +-
 tests/src/test/scala/common/rest/WskRest.scala | 23 +++---
 .../test/scala/system/basic/WskConsoleTests.scala  | 13 ++--
 .../src/test/scala/system/basic/WskRuleTests.scala |  4 ++--
 .../core/controller/test/ActivationsApiTests.scala | 23 ++
 9 files changed, 41 insertions(+), 43 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index d25b84e..b3644b4 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -200,10 +200,10 @@ db:
   - whisk.system
   whisk:
 actions: "{{ db_prefix }}whisks"
-actions_ddoc: "whisks"
+actions_ddoc: "whisks.v2"
 activations: "{{ db_prefix }}activations"
-activations_ddoc: "whisks"
-activations_filter_ddoc: "whisks"
+activations_ddoc: "whisks.v2"
+activations_filter_ddoc: "whisks-filters.v2"
 auth: "{{ db_prefix }}subjects"
 
 apigateway:
diff --git a/ansible/logs.yml b/ansible/logs.yml
index 43dc52d..3dc1b1e 100644
--- a/ansible/logs.yml
+++ b/ansible/logs.yml
@@ -10,7 +10,7 @@
 - name: create "logs" folder
   file: path="{{ openwhisk_home }}/logs" state=directory
 - name: dump entity views
-  local_action: shell "{{ openwhisk_home }}/bin/wskadmin" db get whisks 
--docs --view whisks/{{ item }} | tail -n +2 > "{{ openwhisk_home }}/logs/db-{{ 
item }}.log"
+  local_action: shell "{{ openwhisk_home }}/bin/wskadmin" db get whisks 
--docs --view whisks.v2/{{ item }} | tail -n +2 > "{{ openwhisk_home 
}}/logs/db-{{ item }}.log"
   with_items:
 - actions
 - triggers
diff --git 
a/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala 
b/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala
index 4073093..596a5ec 100644
--- a/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/WhiskActivation.scala
@@ -145,8 +145,8 @@ object WhiskActivation
   // which are readily available here; rather than introduce significant 
refactoring,
   // defer this fix until WhiskConfig is refactored itself, which is planned 
to introduce
   // type safe properties
-  private val mainDdoc = 
WhiskConfig.readFromEnv(dbActivationsDesignDoc).getOrElse("whisks")
-  private val filtersDdoc = 
WhiskConfig.readFromEnv(dbActivationsFilterDesignDoc).getOrElse("whisks")
+  private val mainDdoc = 
WhiskConfig.readFromEnv(dbActivationsDesignDoc).getOrElse("whisks.v2")
+  private val filtersDdoc = 
WhiskConfig.readFromEnv(dbActivationsFilterDesignDoc).getOrElse("whisks-filters.v2")
 
   /** The main view for activations, keyed by namespace, sorted by date. */
   override lazy val view = WhiskEntityQueries.view(mainDdoc, collectionName)
diff --git a/common/scala/src/main/scala/whisk/core/entity/WhiskStore.scala 
b/common/scala/src/main/scala/whisk/core/entity/WhiskStore.scala
index b114450..3bd6367 100644
--- a/common/scala/src/main/scala/whisk/core/entity/WhiskStore.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/WhiskStore.scala
@@ -40,7 +40,7 @@ import whisk.core.WhiskConfig.dbProvider
 import whisk.core.WhiskConfig.dbUsername
 import whisk.core.WhiskConfig.dbWhisk
 import whisk.core.WhiskConfig.dbWhiskDesignDoc
-import whisk.core.WhiskConfig.dbActivationsDesignDoc
+import whisk.core.WhiskConfig.{dbActivationsDesignDoc, 
dbActivationsFilterDesignDoc}
 import whisk.core.database.ArtifactStore
 import whisk.core.database.ArtifactStoreProvider
 import whisk.core.database.DocumentRevisionProvider
@@ -139,7 +139,8 @@ object WhiskActivationStore {
   dbHost -> null,
   dbPort -> null,
   dbActivations -> null,
-  dbActivationsDesignDoc -> null)
+  dbActivationsDesignDoc -> null,
+  dbActivationsFilterDesignDoc -> null)
 
   def datastore(config: WhiskConfig)(implicit system: ActorSystem, logging: 
Logging, materializer: ActorMa

[incubator-openwhisk] branch master updated: Bump idle-timeout for HTTP clients in test. (#2989)

2017-11-21 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 1a4493f  Bump idle-timeout for HTTP clients in test. (#2989)
1a4493f is described below

commit 1a4493f1facd301d34c2beb5dd011c52d3b2953c
Author: Markus Thömmes 
AuthorDate: Tue Nov 21 11:38:51 2017 +0100

Bump idle-timeout for HTTP clients in test. (#2989)

A blocking request will be open for at most 60 seconds before it falls back 
to a non-blocking request. The default idle-timeout is at 60 seconds so a racy 
failure happens in tests, where a blocking fallback is provoked.
---
 tests/src/test/resources/application.conf | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/tests/src/test/resources/application.conf 
b/tests/src/test/resources/application.conf
index 39960d7..750c19d 100644
--- a/tests/src/test/resources/application.conf
+++ b/tests/src/test/resources/application.conf
@@ -3,3 +3,6 @@ whisk.spi {
   MissingSpi = whisk.spi.MissingImpl
   MissingModule = missing.module
 }
+
+# Blocking requests fall back to non-blocking after ~60s
+akka.http.client.idle-timeout = 90 s

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Remove old views. (#2763)

2017-11-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new e404577  Remove old views. (#2763)
e404577 is described below

commit e404577667c695a96e51d6a1dfd9e9a6aec98844
Author: rodric rabbah 
AuthorDate: Wed Nov 22 05:40:14 2017 -0500

Remove old views. (#2763)
---
 .../whisks_design_document_for_actions_db.json | 28 --
 .../whisks_design_document_for_activations_db.json | 12 --
 ansible/tasks/recreateViews.yml|  2 --
 .../scala/whisk/core/entity/WhiskActivation.scala  | 14 +--
 4 files changed, 6 insertions(+), 50 deletions(-)

diff --git a/ansible/files/whisks_design_document_for_actions_db.json 
b/ansible/files/whisks_design_document_for_actions_db.json
deleted file mode 100644
index 2949be5..000
--- a/ansible/files/whisks_design_document_for_actions_db.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
-  "_id": "_design/whisks",
-  "views": {
-"all": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isPackage = 
function (doc) {  return (doc.binding !== undefined) };\n  var isAction = 
function (doc) { return (doc.exec !== undefined) };\n  var isTrigger = function 
(doc) { return (doc.exec === undefined && doc.binding === undefined && 
doc.parameters !== undefined) };\n  var isRule = function (doc) {  return 
(doc.trigger !== undefined) };\n  \n  var collection = function (doc) {\nif 
(isPackage(doc)) return \"packages\"; [...]
-},
-"entities": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isPackage = 
function (doc) {  return (doc.binding !== undefined) };\n  var isAction = 
function (doc) { return (doc.exec !== undefined) };\n  var isTrigger = function 
(doc) { return (doc.exec === undefined && doc.binding === undefined && 
doc.parameters !== undefined) };\n  var isRule = function (doc) {  return 
(doc.trigger !== undefined) };\n  \n  var collection = function (doc) {\nif 
(isPackage(doc)) return \"packages\"; [...]
-},
-"packages": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isPackage = 
function (doc) {  return (doc.binding !== undefined) };\n  var isAction = 
function (doc) { return (doc.exec !== undefined) };\n  var isTrigger = function 
(doc) { return (doc.exec === undefined && doc.binding === undefined && 
doc.parameters !== undefined) };\n  var isRule = function (doc) {  return 
(doc.trigger !== undefined) };\n\n  if (isPackage(doc)) try {\nvar date = 
new Date(doc.start || doc.updated);\n   [...]
-},
-"packages-all": {
-  "reduce": "function (keys, values, rereduce) {\n  var isPublicPackage = 
function(p) { return p.publish && !p.binding; };\n\n  if (rereduce) {\n
return [].concat.apply([], values);\n  } else {\nreturn 
values.filter(isPublicPackage);\n  }\n}",
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isPackage = 
function (doc) {  return (doc.binding !== undefined) };\n  var isAction = 
function (doc) { return (doc.exec !== undefined) };\n  var isTrigger = function 
(doc) { return (doc.exec === undefined && doc.binding === undefined && 
doc.parameters !== undefined) };\n  var isRule = function (doc) {  return 
(doc.trigger !== undefined) };\n\n  if (isPackage(doc)) try {\nvar date = 
new Date(doc.start || doc.updated);\n   [...]
-},
-"actions": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isPackage = 
function (doc) {  return (doc.binding !== undefined) };\n  var isAction = 
function (doc) { return (doc.exec !== undefined) };\n  var isTrigger = function 
(doc) { return (doc.exec === undefined && doc.binding === undefined && 
doc.parameters !== undefined) };\n  var isRule = function (doc) {  return 
(doc.trigger !== undefined) };\n\n  if (isAction(doc)) try {\nvar ns = 
doc.namespace.split(PATHSEP);\nvar roo [...]
-},
-"triggers": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n\n  var isPackage = 
function (doc) {  return (doc.binding !== undefined) };\n  var isAction = 
function (doc) { return (doc.exec !== undefined) };\n  var isTrigger = function 
(doc) { return (doc.exec === undefined && doc.binding === undefined && 
doc.parameters !== undefined) };\n  var isRule = function (doc) {  return 
(doc.trigger !== undefined) };\n\n  if (isTrigger(doc)) try {\nvar ns = 
doc.namespace.split(PATHSEP);\nvar ro [...]
-},
-"rules": {

[incubator-openwhisk] branch master updated: Add couchdb clustering. (#2810)

2017-11-29 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new e587bf8  Add couchdb clustering. (#2810)
e587bf8 is described below

commit e587bf8ec632320614f14114c4e85e67f39c06c2
Author: Dominic Kim 
AuthorDate: Wed Nov 29 21:49:36 2017 +0900

Add couchdb clustering. (#2810)

* Add couchdb clustering
---
 ansible/group_vars/all |   3 +-
 ansible/roles/couchdb/tasks/deploy.yml |  56 +-
 ansible/templates/whisk.properties.j2  |   2 +
 .../src/main/scala/whisk/core/WhiskConfig.scala|   3 +-
 tests/src/test/scala/common/WhiskProperties.java   |  16 +-
 tests/src/test/scala/ha/ShootComponentsTests.scala | 198 +++--
 6 files changed, 251 insertions(+), 27 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index f1a746d..5b2ea44 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -213,6 +213,7 @@ nginx:
 # The key db.whisk.auth is the name of the authentication database where all 
keys of all users are stored.
 # The db_prefix is defined for each environment on its own. The CouchDb 
credentials are also defined for each environment on its own.
 db:
+  instances: "{{ groups['db'] | length }}"
   authkeys:
   - guest
   - whisk.system
@@ -239,7 +240,7 @@ linux:
   version: 4.4.0-31
 
 couchdb:
-  version: 2.0
+  version: 2.1
 
 docker:
   # The user to install docker for. Defaults to the ansible user if not set. 
This will be the user who is able to run
diff --git a/ansible/roles/couchdb/tasks/deploy.yml 
b/ansible/roles/couchdb/tasks/deploy.yml
index 27de148..88ec1eb 100644
--- a/ansible/roles/couchdb/tasks/deploy.yml
+++ b/ansible/roles/couchdb/tasks/deploy.yml
@@ -1,9 +1,9 @@
 ---
 # This role will run a CouchDB server on the db group
 
-- name: "Set node name to couchdb{{ groups['db'].index(inventory_hostname) }}"
+- name: "Set the coordinator to the first node"
   set_fact:
-nodeName: "couchdb{{ groups['db'].index(inventory_hostname) }}"
+coordinator: "{{ groups['db'][0] }}"
 
 - name: check if db credentials are valid for CouchDB
   fail: msg="The db provider in your {{ inventory_dir }}/group_vars/all is {{ 
db_provider }}, it has to be CouchDB, pls double check"
@@ -21,25 +21,27 @@
 volume_dir: "{{ instance.volume.fsmount | default( '/mnt/' + 
group_names|first, true ) }}:/usr/local/var/lib/couchdb"
   when: (block_device is defined) and (block_device in disk_status.stdout)
 
-- name: "pull the klaemo/couchdb:{{ couchdb.version }} image"
-  shell: "docker pull klaemo/couchdb:{{ couchdb.version }}"
+- name: "pull the apache/couchdb:{{ couchdb.version }} image"
+  shell: "docker pull apache/couchdb:{{ couchdb.version }}"
   retries: "{{ docker.pull.retries }}"
   delay: "{{ docker.pull.delay }}"
 
 - name: (re)start CouchDB
   docker_container:
 name: couchdb
-image: klaemo/couchdb:{{ couchdb.version }}
+image: apache/couchdb:{{ couchdb.version }}
 state: started
 recreate: true
 restart_policy: "{{ docker.restart.policy }}"
 volumes: "{{volume_dir | default([])}}"
 ports:
   - "{{ db_port }}:5984"
+  - "4369:4369"
+  - "9100:9100"
 env:
   COUCHDB_USER: "{{ db_username }}"
   COUCHDB_PASSWORD: "{{ db_password }}"
-  NODENAME: "{{ nodeName }}"
+  NODENAME: "{{ ansible_host }}"
 
 - name: wait until CouchDB in this host is up and running
   uri:
@@ -49,9 +51,48 @@
   retries: 12
   delay: 5
 
+- name: enable the cluster setup mode
+  uri:
+url: "{{ db_protocol }}://{{ ansible_host }}:{{ db_port }}/_cluster_setup"
+method: POST
+body: >
+{"action": "enable_cluster", "bind_address":"0.0.0.0", "username": "{{ 
db_username }}", "password":"{{ db_password }}", "port": {{ db_port }}, 
"node_count": "{{ groups['db'] | length }}", "remote_node": "{{ ansible_host 
}}", "remote_current_user": "{{ db_username }}", "remote_current_password": "{{ 
db_password }}"}
+body_format: json
+status_code: 201
+user: "{{ db_username }}"
+password: "{{ db_password }}"
+force_basic_auth: yes
+  when: inventory_hostname == coordinator
+
+- name: add remote nodes to the cluster
+  uri:
+url: "{{ db_protocol }}://{{ coordinator }}:{{ db_port }}/_cluster_setup"
+method: POST
+body: >
+{&q

[incubator-openwhisk] branch master updated: Add some logs statements to the test to hunt heisenbug #3022. (#3033)

2017-12-01 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 88c6ed2  Add some logs statements to the test to hunt heisenbug #3022. 
(#3033)
88c6ed2 is described below

commit 88c6ed2ce033c9512dfc5fabbbd21d1e6c3739e0
Author: Jeremias Werner 
AuthorDate: Fri Dec 1 11:16:48 2017 +0100

Add some logs statements to the test to hunt heisenbug #3022. (#3033)
---
 tests/src/test/scala/services/KafkaConnectorTests.scala | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/tests/src/test/scala/services/KafkaConnectorTests.scala 
b/tests/src/test/scala/services/KafkaConnectorTests.scala
index 8974be3..b3c6552 100644
--- a/tests/src/test/scala/services/KafkaConnectorTests.scala
+++ b/tests/src/test/scala/services/KafkaConnectorTests.scala
@@ -78,7 +78,10 @@ class KafkaConnectorTests extends FlatSpec with Matchers 
with WskActorSystem wit
 waitForSend: FiniteDuration,
 waitForReceive: FiniteDuration): Iterable[String] 
= {
 val start = java.lang.System.currentTimeMillis
+println(s"Send message to topic.\n")
 val sent = Await.result(producer.send(topic, message), waitForSend)
+println(s"Successfully sent message to topic: ${sent}\n")
+println(s"Receiving message from topic.\n")
 val received = consumer.peek(waitForReceive).map { case (_, _, _, msg) => 
new String(msg, "utf-8") }
 val end = java.lang.System.currentTimeMillis
 val elapsed = end - start

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Add LogStore which stores to database and file simultaneously. (#2974)

2017-12-06 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new d69f8db  Add LogStore which stores to database and file 
simultaneously. (#2974)
d69f8db is described below

commit d69f8db02be70db0da1e07614124ea2e08dd6826
Author: Markus Thömmes 
AuthorDate: Wed Dec 6 13:57:22 2017 +0100

Add LogStore which stores to database and file simultaneously. (#2974)

Adds a new LogStore which reads from the docker file in a streaming 
fashion, enhances the lines read and writes them out to an external file. This 
file can then be picked up by any asynchronous external process to decide what 
to do with those logs outside of the container lifecycle of the invoker.

Notable changes:
- Make LogStore configurable via ansible.
- Add activation and user information to the collectLogs interface.
- Renamed the existing LogStore to be more self-explaining.
---
 ansible/group_vars/all |   2 +
 ansible/roles/controller/tasks/deploy.yml  |   2 +
 ansible/roles/invoker/tasks/deploy.yml |   1 +
 common/scala/src/main/resources/reference.conf |   4 +-
 .../logging/DockerToActivationFileLogStore.scala   | 156 ++
 ...tore.scala => DockerToActivationLogStore.scala} |  19 +--
 .../containerpool/logging/LogRotatorSink.scala | 176 +
 .../core/containerpool/logging/LogStore.scala  |  10 +-
 .../whisk/core/containerpool/ContainerProxy.scala  |  32 ++--
 .../scala/whisk/core/invoker/InvokerReactive.scala |   1 +
 .../docker/test/DockerContainerTests.scala |   4 +-
 .../test/DockerToActivationFileLogStoreTests.scala |  78 +
 ...scala => DockerToActivationLogStoreTests.scala} |  19 ++-
 .../containerpool/test/ContainerProxyTests.scala   |   9 +-
 14 files changed, 473 insertions(+), 40 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 0e89cc0..7467aee 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -184,6 +184,8 @@ invoker:
   docker:
 become: "{{ invoker_docker_become | default(false) }}"
 
+userLogs:
+  spi: "{{ userLogs_spi | 
default('whisk.core.containerpool.logging.DockerToActivationLogStoreProvider') 
}}"
 
 nginx:
   confdir: "{{ config_root_dir }}/nginx"
diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index a0cdc72..6caa545 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -94,6 +94,8 @@
 
   "CONFIG_kamon_statsd_hostname": "{{ metrics.kamon.host }}"
   "CONFIG_kamon_statsd_port": "{{ metrics.kamon.port }}"
+
+  "CONFIG_whisk_spi_LogStoreProvider": "{{ userLogs.spi }}"
 volumes:
   - "{{ whisk_logs_dir }}/controller{{ 
groups['controllers'].index(inventory_hostname) }}:/logs"
 ports:
diff --git a/ansible/roles/invoker/tasks/deploy.yml 
b/ansible/roles/invoker/tasks/deploy.yml
index 0aa0d5d..112b37b 100644
--- a/ansible/roles/invoker/tasks/deploy.yml
+++ b/ansible/roles/invoker/tasks/deploy.yml
@@ -153,6 +153,7 @@
 -e METRICS_LOG='{{ metrics.log.enabled }}'
 -e CONFIG_kamon_statsd_hostname='{{ metrics.kamon.host }}'
 -e CONFIG_kamon_statsd_port='{{ metrics.kamon.port }}'
+-e CONFIG_whisk_spi_LogStoreProvider='{{ userLogs.spi }}'
 -v /sys/fs/cgroup:/sys/fs/cgroup
 -v /run/runc:/run/runc
 -v {{ whisk_logs_dir }}/invoker{{ 
groups['invokers'].index(inventory_hostname) }}:/logs
diff --git a/common/scala/src/main/resources/reference.conf 
b/common/scala/src/main/resources/reference.conf
index bf2e694..45543e5 100644
--- a/common/scala/src/main/resources/reference.conf
+++ b/common/scala/src/main/resources/reference.conf
@@ -2,5 +2,5 @@ whisk.spi{
   ArtifactStoreProvider = whisk.core.database.CouchDbStoreProvider
   MessagingProvider = whisk.connector.kafka.KafkaMessagingProvider
   ContainerFactoryProvider = 
whisk.core.containerpool.docker.DockerContainerFactoryProvider
-  LogStoreProvider = whisk.core.containerpool.logging.DockerLogStoreProvider
-}
\ No newline at end of file
+  LogStoreProvider = 
whisk.core.containerpool.logging.DockerToActivationLogStoreProvider
+}
diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
new file mode 100644
index 000..f31c320
--- /dev/null
+++ 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the

[incubator-openwhisk] branch master updated: Add namespaceId to the activation record as well to enable correlation. (#3073)

2017-12-07 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 94200d4  Add namespaceId to the activation record as well to enable 
correlation. (#3073)
94200d4 is described below

commit 94200d4dc7a6aaae6b694fc7333690df13c2975c
Author: Markus Thömmes 
AuthorDate: Thu Dec 7 13:59:03 2017 +0100

Add namespaceId to the activation record as well to enable correlation. 
(#3073)
---
 .../logging/DockerToActivationFileLogStore.scala   | 10 +++---
 .../logging/test/DockerToActivationFileLogStoreTests.scala | 14 +++---
 2 files changed, 18 insertions(+), 6 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
index f31c320..f9ec413 100644
--- 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
+++ 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/DockerToActivationFileLogStore.scala
@@ -99,10 +99,14 @@ class DockerToActivationFileLogStore(system: ActorSystem, 
destinationDirectory:
 
 val logs = container.logs(action.limits.logs.asMegaBytes, 
action.exec.sentinelledLogs)(transid)
 
+// Adding the userId field to every written record, so any background 
process can properly correlate.
+val userIdField = Map("namespaceId" -> user.authkey.uuid.toJson)
+
 val additionalMetadata = Map(
   "activationId" -> activation.activationId.asString.toJson,
-  "action" -> action.fullyQualifiedName(false).asString.toJson,
-  "userId" -> user.authkey.uuid.toJson)
+  "action" -> action.fullyQualifiedName(false).asString.toJson) ++ 
userIdField
+
+val augmentedActivation = JsObject(activation.toJson.fields ++ userIdField)
 
 // Manually construct JSON fields to omit parsing the whole structure
 val metadata = ByteString("," + fieldsString(additionalMetadata))
@@ -113,7 +117,7 @@ class DockerToActivationFileLogStore(system: ActorSystem, 
destinationDirectory:
 // the closing "}", adding the fields and finally add "}\n" to the end 
again.
   .map(_.dropRight(1) ++ metadata ++ eventEnd)
   // As the last element of the stream, print the activation record.
-  .concat(Source.single(ByteString(activation.toJson.compactPrint + "\n")))
+  .concat(Source.single(ByteString(augmentedActivation.toJson.compactPrint 
+ "\n")))
   .to(writeToFile)
 
 val combined = OwSink.combine(toSeq, toFile)(Broadcast[ByteString](_))
diff --git 
a/tests/src/test/scala/whisk/core/containerpool/logging/test/DockerToActivationFileLogStoreTests.scala
 
b/tests/src/test/scala/whisk/core/containerpool/logging/test/DockerToActivationFileLogStoreTests.scala
index 1f5f22c..f142c1b 100644
--- 
a/tests/src/test/scala/whisk/core/containerpool/logging/test/DockerToActivationFileLogStoreTests.scala
+++ 
b/tests/src/test/scala/whisk/core/containerpool/logging/test/DockerToActivationFileLogStoreTests.scala
@@ -25,6 +25,7 @@ import akka.util.ByteString
 import common.{StreamLogging, WskActorSystem}
 import org.scalatest.Matchers
 import spray.json._
+import spray.json.DefaultJsonProtocol._
 import whisk.common.TransactionId
 import whisk.core.containerpool.logging.{DockerToActivationFileLogStore, 
LogLine}
 import whisk.core.entity._
@@ -41,14 +42,21 @@ class DockerToActivationFileLogStoreTests
 
   override def createStore() = new TestLogStoreTo(Sink.ignore)
 
-  def toLoggedEvent(line: LogLine, userId: UUID, activationId: ActivationId, 
actionName: FullyQualifiedEntityName) = {
+  def toLoggedEvent(line: LogLine,
+userId: UUID,
+activationId: ActivationId,
+actionName: FullyQualifiedEntityName): String = {
 val event = line.toJson.compactPrint
 val concatenated =
-  
s""","activationId":"${activationId.asString}","action":"${actionName.asString}","userId":"${userId.asString}""""
+  
s""","activationId":"${activationId.asString}","action":"${actionName.asString}","namespaceId":"${userId.asString}""""
 
 event.dropRight(1) ++ concatenated ++ "}\n"
   }
 
+  def toLoggedActivation(activation: WhiskActivation): String = {
+JsObject(activation.toJson.fields ++ Map("namespaceId" -> 
user.authkey.uuid.asString.toJson)).compactPrint + "\n"
+  }
+
   behavior of "DockerCouchDbFileLogStore"
 
   it should "read logs returned by the container,

[incubator-openwhisk] branch master updated: Disable reduce limits on all nodes. (#3101)

2017-12-12 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new ecace1a  Disable reduce limits on all nodes. (#3101)
ecace1a is described below

commit ecace1ad699d87acbdd6630396aef3d9399e0c3f
Author: Dominic Kim 
AuthorDate: Wed Dec 13 16:44:15 2017 +0900

Disable reduce limits on all nodes. (#3101)
---
 ansible/roles/couchdb/tasks/deploy.yml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/ansible/roles/couchdb/tasks/deploy.yml 
b/ansible/roles/couchdb/tasks/deploy.yml
index 803cfe5..542111a 100644
--- a/ansible/roles/couchdb/tasks/deploy.yml
+++ b/ansible/roles/couchdb/tasks/deploy.yml
@@ -101,4 +101,3 @@
 user: "{{ db_username }}"
 password: "{{ db_password }}"
 force_basic_auth: yes
-  when: inventory_hostname == coordinator

-- 
To stop receiving notification emails like this one, please contact
['"commits@openwhisk.apache.org" '].


[incubator-openwhisk] branch master updated: Use alpakka's FileRotator instead of the copied one. (#3102)

2018-01-08 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new b485c5e  Use alpakka's FileRotator instead of the copied one. (#3102)
b485c5e is described below

commit b485c5e249ed45e0395f2e8056972adb737f0353
Author: Markus Thömmes 
AuthorDate: Mon Jan 8 12:53:45 2018 +0100

Use alpakka's FileRotator instead of the copied one. (#3102)
---
 common/scala/build.gradle  |   2 +
 .../containerpool/logging/LogRotatorSink.scala | 176 -
 core/invoker/build.gradle  |   1 -
 3 files changed, 2 insertions(+), 177 deletions(-)

diff --git a/common/scala/build.gradle b/common/scala/build.gradle
index 31781f4..2c04731 100644
--- a/common/scala/build.gradle
+++ b/common/scala/build.gradle
@@ -21,6 +21,8 @@ dependencies {
 compile 'com.typesafe.akka:akka-http-core_2.11:10.0.10'
 compile 'com.typesafe.akka:akka-http-spray-json_2.11:10.0.10'
 
+compile 'com.lightbend.akka:akka-stream-alpakka-file_2.11:0.15'
+
 compile 'ch.qos.logback:logback-classic:1.2.3'
 compile 'org.slf4j:jcl-over-slf4j:1.7.25'
 compile 'org.slf4j:log4j-over-slf4j:1.7.25'
diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/LogRotatorSink.scala
 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/LogRotatorSink.scala
deleted file mode 100644
index 6c5681b..000
--- 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/LogRotatorSink.scala
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// TO BE TAKEN OUT AFTER ALPAKKA 0.15 RELEASE
-
-/*
- * Copyright (C) 2016-2017 Lightbend Inc. <http://www.lightbend.com>
- */
-
-package akka.stream.alpakka.file.scaladsl
-
-import java.nio.file.{OpenOption, Path, StandardOpenOption}
-
-import akka.Done
-import akka.stream.ActorAttributes.SupervisionStrategy
-import akka.stream._
-import akka.stream.impl.fusing.MapAsync.{Holder, NotYetThere}
-import akka.stream.scaladsl.{FileIO, Sink, Source}
-import akka.stream.stage._
-import akka.util.ByteString
-
-import scala.concurrent.{ExecutionContext, Future, Promise}
-import scala.util.{Failure, Success}
-
-object LogRotatorSink {
-  def apply(functionGeneratorFunction: () => ByteString => Option[Path],
-fileOpenOptions: Set[OpenOption] = Set(StandardOpenOption.APPEND, 
StandardOpenOption.CREATE))
-: Sink[ByteString, Future[Done]] =
-Sink.fromGraph(new LogRotatorSink(functionGeneratorFunction, 
fileOpenOptions))
-}
-
-final private[scaladsl] class LogRotatorSink(functionGeneratorFunction: () => 
ByteString => Option[Path],
- fileOpenOptions: Set[OpenOption])
-extends GraphStageWithMaterializedValue[SinkShape[ByteString], 
Future[Done]] {
-
-  val in = Inlet[ByteString]("FRotator.in")
-  override val shape = SinkShape.of(in)
-
-  override def createLogicAndMaterializedValue(inheritedAttributes: 
Attributes): (GraphStageLogic, Future[Done]) = {
-val promise = Promise[Done]()
-val logic = new GraphStageLogic(shape) {
-  val pathGeneratorFunction: ByteString => Option[Path] = 
functionGeneratorFunction()
-  var sourceOut: SubSourceOutlet[ByteString] = _
-  var fileSinkCompleted: Seq[Future[IOResult]] = Seq.empty
-  val decider =
-
inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)
-
-  def failThisStage(ex: Throwable): Unit =
-if (!promise.isCompleted) {
-  if (sourceOut != null) {
-sourceOut.fail(ex)
-  }
-  cancel(in)
-  promise.failure(ex)
-}
-
-  def generatePathOrFailPeacefully(data: ByteString): Option[Path] = {
-var ret = Option.empty[Path]
-try {
-  ret = pathGeneratorFunction(data)
-} catch {
-  case ex: Throwable =>
-failThisStage(ex)
-

[incubator-openwhisk] branch master updated: Bound docker/runc commands in their allowed runtime. (#3094)

2018-01-08 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new ec8148f  Bound docker/runc commands in their allowed runtime. (#3094)
ec8148f is described below

commit ec8148fbba3bf8142853fa2aee0bcfb92fd7a5ae
Author: Markus Thömmes 
AuthorDate: Mon Jan 8 12:54:09 2018 +0100

Bound docker/runc commands in their allowed runtime. (#3094)

Docker can cause hanging commands which never finish. Essentially those 
break the invoker and it needs to be restarted to recover. This adds a timeout 
to each of those commands to detect this problem.
---
 core/invoker/src/main/resources/application.conf   | 22 
 .../core/containerpool/docker/DockerClient.scala   | 61 ++
 .../docker/DockerClientWithFileAccess.scala|  9 ++--
 .../docker/DockerContainerFactory.scala|  2 +-
 .../core/containerpool/docker/ProcessRunner.scala  | 26 ++---
 .../core/containerpool/docker/RuncClient.scala | 30 ---
 .../docker/test/DockerClientTests.scala| 25 ++---
 .../test/DockerClientWithFileAccessTests.scala | 28 ++
 .../docker/test/ProcessRunnerTests.scala   | 24 ++---
 .../docker/test/RuncClientTests.scala  | 12 +++--
 10 files changed, 173 insertions(+), 66 deletions(-)

diff --git a/core/invoker/src/main/resources/application.conf 
b/core/invoker/src/main/resources/application.conf
new file mode 100644
index 000..e7ed0a6
--- /dev/null
+++ b/core/invoker/src/main/resources/application.conf
@@ -0,0 +1,22 @@
+# common logging configuration see common scala
+include "logging"
+include "akka-http-version"
+
+whisk {
+  # Timeouts for docker commands. Set to "Inf" to disable timeout.
+  docker.timeouts {
+run: 1 minute
+rm: 1 minute
+pull: 10 minutes
+ps: 1 minute
+inspect: 1 minute
+pause: 10 seconds
+unpause: 10 seconds
+  }
+
+  # Timeouts for runc commands. Set to "Inf" to disable timeout.
+  runc.timeouts {
+pause: 10 seconds
+resume: 10 seconds
+  }
+}
\ No newline at end of file
diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerClient.scala
 
b/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerClient.scala
index e6b3dab..4ed631d 100644
--- 
a/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerClient.scala
+++ 
b/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerClient.scala
@@ -22,6 +22,8 @@ import java.nio.file.Files
 import java.nio.file.Paths
 import java.util.concurrent.Semaphore
 
+import akka.actor.ActorSystem
+
 import scala.collection.concurrent.TrieMap
 import scala.concurrent.blocking
 import scala.concurrent.ExecutionContext
@@ -30,13 +32,15 @@ import scala.util.Failure
 import scala.util.Success
 import scala.util.Try
 import akka.event.Logging.ErrorLevel
-
+import pureconfig.loadConfigOrThrow
 import whisk.common.Logging
 import whisk.common.LoggingMarkers
 import whisk.common.TransactionId
 import whisk.core.containerpool.ContainerId
 import whisk.core.containerpool.ContainerAddress
 
+import scala.concurrent.duration.Duration
+
 object DockerContainerId {
 
   val containerIdRegex = """^([0-9a-f]{64})$""".r
@@ -50,6 +54,17 @@ object DockerContainerId {
 }
 
 /**
+ * Configuration for docker client command timeouts.
+ */
+case class DockerClientTimeoutConfig(run: Duration,
+ rm: Duration,
+ pull: Duration,
+ ps: Duration,
+ pause: Duration,
+ unpause: Duration,
+ inspect: Duration)
+
+/**
  * Serves as interface to the docker CLI tool.
  *
  * Be cautious with the ExecutionContext passed to this, as the
@@ -57,7 +72,10 @@ object DockerContainerId {
  *
  * You only need one instance (and you shouldn't get more).
  */
-class DockerClient(dockerHost: Option[String] = None)(executionContext: 
ExecutionContext)(implicit log: Logging)
+class DockerClient(dockerHost: Option[String] = None,
+   timeouts: DockerClientTimeoutConfig =
+ 
loadConfigOrThrow[DockerClientTimeoutConfig]("whisk.docker.timeouts"))(
+  executionContext: ExecutionContext)(implicit log: Logging, as: ActorSystem)
 extends DockerApi
 with ProcessRunner {
   implicit private val ec = executionContext
@@ -95,14 +113,12 @@ class DockerClient(dockerHost: Option[String] = 
None)(executionContext: Executio
   }
 }.flatMap { _ =>
   // Iff the semaphore was acquired successfully
-  runCmd((Seq("run", "-d") ++ args ++ Seq(image)): _*)
+  runCmd(Seq(

[incubator-openwhisk] branch master updated: Enrich entity views, and add count reducer (#3152)

2018-01-11 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new c7ef96a  Enrich entity views, and add count reducer (#3152)
c7ef96a is described below

commit c7ef96a0a09eb29ec9116bead011d0657196d2ec
Author: rodric rabbah 
AuthorDate: Thu Jan 11 04:05:55 2018 -0500

Enrich entity views, and add count reducer (#3152)

* Remove reduce_limit setting for couchdb as it is no longer necessary.

* Upate design docs to provide more asset summaries and support counts.

Enrich action list view with limits and "binary" property of exec.
Enrich rule list view to include action/trigger names.
Enrich package list view to include binding information.
Add package list view for public packages.
Add count reducers.

Factor out all view, easier to then drop it without affecting rest of the 
views.
Update tests for new design docs.
---
 ansible/README.md  |  5 +---
 ...document_for_activations_db_filters_v2.1.0.json | 10 +++
 ..._design_document_for_activations_db_v2.1.0.json | 10 +++
 ...design_document_for_all_entities_db_v2.1.0.json | 10 +++
 ...sks_design_document_for_entities_db_v2.1.0.json | 26 +
 ansible/roles/couchdb/tasks/deploy.yml | 12 
 .../main/scala/whisk/core/entity/WhiskAction.scala | 24 ++--
 .../scala/whisk/core/entity/WhiskActivation.scala  |  9 --
 .../main/scala/whisk/core/entity/WhiskEntity.scala | 15 +++---
 .../scala/whisk/core/entity/WhiskPackage.scala | 21 --
 .../main/scala/whisk/core/controller/Actions.scala | 16 +++
 .../scala/whisk/core/controller/Packages.scala | 33 +-
 .../main/scala/whisk/core/controller/Rules.scala   |  8 ++
 .../scala/whisk/core/controller/Triggers.scala | 12 +++-
 .../scala/whisk/core/entity/test/ViewTests.scala   | 31 ++--
 tools/db/README.md |  1 -
 16 files changed, 140 insertions(+), 103 deletions(-)

diff --git a/ansible/README.md b/ansible/README.md
index 95bbcf9..7651e63 100644
--- a/ansible/README.md
+++ b/ansible/README.md
@@ -136,10 +136,7 @@ ansible-playbook -i environments/ prereq.yml
 **Hint:** During playbook execution the `TASK [prereq : check for pip]` can 
show as failed. This is normal if no pip is installed. The playbook will then 
move on and install pip on the target machines.
 
 ### Deploying Using CouchDB
-- Make sure your `db_local.ini` file is set up for CouchDB. See [Setup](#setup)
-- If you deploy CouchDB manually (i.e., without using the deploy CouchDB 
playbook), you must set the `reduce_limit` property on views to `false`.
-This may be done via the REST API, as in: `curl -X PUT 
${OW_DB_PROTOCOL}://${OW_DB_HOST}:${OW_DB_PORT}/_config/query_server_config/reduce_limit
 -d '"false"' -u ${OW_DB_USERNAME}:${OW_DB_PASSWORD}`.
-- Then execute
+- Make sure your `db_local.ini` file is [setup for](#setup) CouchDB then 
execute:
 
 ```
 cd 
diff --git 
a/ansible/files/whisks_design_document_for_activations_db_filters_v2.1.0.json 
b/ansible/files/whisks_design_document_for_activations_db_filters_v2.1.0.json
new file mode 100644
index 000..9e2b1f0
--- /dev/null
+++ 
b/ansible/files/whisks_design_document_for_activations_db_filters_v2.1.0.json
@@ -0,0 +1,10 @@
+{
+  "_id": "_design/whisks-filters.v2.1.0",
+  "language": "javascript",
+  "views": {
+"activations": {
+  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n  var summarize = 
function (doc) {\nvar endtime = doc.end !== 0 ? doc.end : undefined;\n
return {\nnamespace: doc.namespace,\nname: doc.name,\n
version: doc.version,\npublish: doc.publish,\nannotations: 
doc.annotations,\nactivationId: doc.activationId,\nstart: 
doc.start,\nend: endtim [...]
+  "reduce": "_count"
+}
+  }
+}
diff --git 
a/ansible/files/whisks_design_document_for_activations_db_v2.1.0.json 
b/ansible/files/whisks_design_document_for_activations_db_v2.1.0.json
new file mode 100644
index 000..87da6ed
--- /dev/null
+++ b/ansible/files/whisks_design_document_for_activations_db_v2.1.0.json
@@ -0,0 +1,10 @@
+{
+  "_id": "_design/whisks.v2.1.0",
+  "language": "javascript",
+  "views": {
+"activations": {
+  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n  var summarize = 
function (d

[incubator-openwhisk] branch master updated: Enable new views. (#3155)

2018-01-15 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 20a5a60  Enable new views. (#3155)
20a5a60 is described below

commit 20a5a6083d1630037740b4cf62f0d4e8fbc5c829
Author: rodric rabbah 
AuthorDate: Tue Jan 16 02:09:31 2018 -0500

Enable new views. (#3155)

Support ?count to retrieve document count for a collection.
Use public-package view for listing package in other namespaces.
Fix logs playbook for new view.
Update wskadmin to set reduce=false.
Use Scalatest theSameElementsAs for comparing List responses.
Remove old views.
---
 ...ign_document_for_activations_db_filters_v2.json |  9 --
 ...isks_design_document_for_activations_db_v2.json |  9 --
 .../whisks_design_document_for_entities_db_v2.json | 21 -
 ansible/logs.yml   |  2 +-
 ansible/tasks/recreateViews.yml|  7 +-
 common/scala/src/main/resources/application.conf   |  6 +-
 .../scala/whisk/core/database/ArtifactStore.scala  | 16 
 .../whisk/core/database/CouchDbRestStore.scala | 99 +++---
 .../main/scala/whisk/core/entity/WhiskAction.scala | 20 ++---
 .../scala/whisk/core/entity/WhiskActivation.scala  |  2 +-
 .../main/scala/whisk/core/entity/WhiskEntity.scala |  7 +-
 .../scala/whisk/core/entity/WhiskPackage.scala | 12 ++-
 .../main/scala/whisk/core/entity/WhiskStore.scala  | 48 ---
 .../main/scala/whisk/core/controller/Actions.scala | 22 ++---
 .../scala/whisk/core/controller/Activations.scala  | 19 +++--
 .../scala/whisk/core/controller/ApiUtils.scala | 52 +---
 .../scala/whisk/core/controller/Entities.scala | 10 +--
 .../scala/whisk/core/controller/Packages.scala | 38 -
 .../main/scala/whisk/core/controller/Rules.scala   | 16 ++--
 .../scala/whisk/core/controller/Triggers.scala | 17 ++--
 .../core/controller/test/ActionsApiTests.scala | 12 +--
 .../core/controller/test/ActivationsApiTests.scala | 63 +++---
 .../core/controller/test/PackagesApiTests.scala| 75 ++--
 .../whisk/core/controller/test/RulesApiTests.scala | 18 ++--
 .../core/controller/test/TriggersApiTests.scala| 12 +--
 .../SequenceActionApiMigrationTests.scala  |  5 +-
 .../scala/whisk/core/database/test/DbUtils.scala   | 14 +--
 tools/admin/wskadmin   |  2 +-
 28 files changed, 333 insertions(+), 300 deletions(-)

diff --git 
a/ansible/files/whisks_design_document_for_activations_db_filters_v2.json 
b/ansible/files/whisks_design_document_for_activations_db_filters_v2.json
deleted file mode 100644
index 14eb0b0..000
--- a/ansible/files/whisks_design_document_for_activations_db_filters_v2.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-  "_id": "_design/whisks-filters.v2",
-  "language": "javascript",
-  "views": {
-"activations": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n  var summarize = 
function (doc) {\nvar endtime = doc.end !== 0 ? doc.end : undefined;\n
return {\nnamespace: doc.namespace,\nname: doc.name,\n
version: doc.version,\npublish: doc.publish,\nannotations: 
doc.annotations,\nactivationId: doc.activationId,\nstart: 
doc.start,\nend: endtim [...]
-}
-  }
-}
diff --git a/ansible/files/whisks_design_document_for_activations_db_v2.json 
b/ansible/files/whisks_design_document_for_activations_db_v2.json
deleted file mode 100644
index be57879..000
--- a/ansible/files/whisks_design_document_for_activations_db_v2.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-  "_id": "_design/whisks.v2",
-  "language": "javascript",
-  "views": {
-"activations": {
-  "map": "function (doc) {\n  var PATHSEP = \"/\";\n  var isActivation = 
function (doc) { return (doc.activationId !== undefined) };\n  var summarize = 
function (doc) {\nvar endtime = doc.end !== 0 ? doc.end : undefined;\n
return {\nnamespace: doc.namespace,\nname: doc.name,\n
version: doc.version,\npublish: doc.publish,\nannotations: 
doc.annotations,\nactivationId: doc.activationId,\nstart: 
doc.start,\nend: endtim [...]
-}
-  }
-}
diff --git a/ansible/files/whisks_design_document_for_entities_db_v2.json 
b/ansible/files/whisks_design_document_for_entities_db_v2.json
deleted file mode 100644
index 97ed91c..000
--- a/ansible/files/whisks_design_document_for_entities_db_v2.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-  "_id": "_design/whisks.v2",
-  "langua

[incubator-openwhisk] branch master updated: Configure jmxremote. (#3163)

2018-01-16 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new fc98e9a  Configure jmxremote. (#3163)
fc98e9a is described below

commit fc98e9acc91191c9b38790eb38f304002f5a264d
Author: Martin Henke 
AuthorDate: Tue Jan 16 10:06:40 2018 +0100

Configure jmxremote. (#3163)

* Configure jmxremote

* Address review comments

Signed-off-by: Martin Henke 

* use conf dir to move jmx user and pw files
---
 ansible/environments/docker-machine/group_vars/all |  3 --
 ansible/environments/local/group_vars/all  |  3 --
 ansible/group_vars/all | 30 +++
 ansible/roles/controller/tasks/clean.yml   |  6 +++
 ansible/roles/controller/tasks/deploy.yml  | 45 +++---
 ansible/roles/invoker/tasks/clean.yml  |  6 +++
 ansible/roles/invoker/tasks/deploy.yml | 32 ++-
 ansible/templates/jmxremote.access.j2  |  1 +
 ansible/templates/jmxremote.password.j2|  1 +
 common/scala/.dockerignore |  1 +
 common/scala/Dockerfile|  5 ++-
 common/scala/copyJMXFiles.sh   |  7 
 common/scala/src/main/resources/logback.xml|  1 +
 core/controller/init.sh|  4 +-
 core/invoker/init.sh   |  4 +-
 15 files changed, 134 insertions(+), 15 deletions(-)

diff --git a/ansible/environments/docker-machine/group_vars/all 
b/ansible/environments/docker-machine/group_vars/all
index a09f335..efd0b56 100644
--- a/ansible/environments/docker-machine/group_vars/all
+++ b/ansible/environments/docker-machine/group_vars/all
@@ -28,9 +28,6 @@ apigw_auth_user: ""
 apigw_auth_pwd: ""
 apigw_host_v2: "http://{{ groups['apigateway']|first 
}}:{{apigateway.port.api}}/v2"
 
-controller_arguments: '-Dcom.sun.management.jmxremote 
-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false 
-Dcom.sun.management.jmxremote.port=1098'
-invoker_arguments: "{{ controller_arguments }}"
-
 invoker_allow_multiple_instances: true
 
 # Set kafka configuration
diff --git a/ansible/environments/local/group_vars/all 
b/ansible/environments/local/group_vars/all
index 9a10b00..0d8dc06 100755
--- a/ansible/environments/local/group_vars/all
+++ b/ansible/environments/local/group_vars/all
@@ -20,9 +20,6 @@ apigw_auth_user: ""
 apigw_auth_pwd: ""
 apigw_host_v2: "http://{{ groups['apigateway']|first 
}}:{{apigateway.port.api}}/v2"
 
-controller_arguments: '-Dcom.sun.management.jmxremote 
-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false 
-Dcom.sun.management.jmxremote.port=1098'
-invoker_arguments: "{{ controller_arguments }}"
-
 invoker_allow_multiple_instances: true
 
 # Set kafka configuration
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index c6957fc..31c462e 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -43,8 +43,14 @@ limits:
   firesPerMinute: "{{ limit_fires_per_minute | default(60) }}"
   sequenceMaxLength: "{{ limit_sequence_max_length | default(50) }}"
 
+controllerHostnameFromMap: "{{ groups['controllers'] | map('extract', 
hostvars, 'ansible_host') | list | first }}"
+controllerHostname: "{{ controllerHostnameFromMap | 
default(inventory_hostname) }}"
+
 # port means outer port
 controller:
+  dir:
+become: "{{ controller_dir_become | default(false) }}"
+  confdir: "{{ config_root_dir }}/controller"
   basePort: 10001
   heap: "{{ controller_heap | default('2g') }}"
   arguments: "{{ controller_arguments | default('') }}"
@@ -62,6 +68,20 @@ controller:
   # We recommend to enable HA for the controllers only, if bookkeeping data 
are shared too. (localBookkeeping: false)
   ha: "{{ controller_enable_ha | default(True) and groups['controllers'] | 
length > 1 }}"
   loglevel: "{{ controller_loglevel | default(whisk_loglevel) | 
default('INFO') }}"
+  jmxremote:
+jvmArgs:  "{% if inventory_hostname in groups['controllers'] %}
+{{ jmx.jvmCommonArgs }} -Djava.rmi.server.hostname={{ controllerHostname 
}} -Dcom.sun.management.jmxremote.rmi.port={{ jmx.rmiBasePortController + 
groups['controllers'].index(inventory_hostname) }} 
-Dcom.sun.management.jmxremote.port={{ jmx.basePortController + 
groups['controllers'].index(inventory_hostname) }}
+{% endif %}"
+
+jmx:
+  basePortController: 15000
+  rmiBasePortController: 16000
+  b

[incubator-openwhisk] branch master updated: Externalize InvokerPool initialization logic. (#3238)

2018-01-31 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 27c3e10  Externalize InvokerPool initialization logic. (#3238)
27c3e10 is described below

commit 27c3e10266bbd9e1a0a0e64aa35054c965f3d4bf
Author: Markus Thömmes 
AuthorDate: Thu Feb 1 08:43:12 2018 +0100

Externalize InvokerPool initialization logic. (#3238)

This piece of logic clutters the loadbalancer's code for no good reason. We 
should externalize it.
---
 .../core/loadBalancer/ContainerPoolBalancer.scala  | 57 +++---
 .../core/loadBalancer/InvokerSupervision.scala | 57 ++
 2 files changed, 55 insertions(+), 59 deletions(-)

diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
index aed332e..786a94a 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
@@ -19,7 +19,7 @@ package whisk.core.loadBalancer
 
 import java.nio.charset.StandardCharsets
 
-import akka.actor.{ActorRefFactory, ActorSystem, Props}
+import akka.actor.{ActorSystem, Props}
 import akka.cluster.Cluster
 import akka.pattern.ask
 import akka.stream.ActorMaterializer
@@ -29,16 +29,14 @@ import pureconfig._
 import whisk.common.{Logging, LoggingMarkers, TransactionId}
 import whisk.core.WhiskConfig._
 import whisk.core.connector._
-import whisk.core.database.NoDocumentException
 import whisk.core.entity._
-import whisk.core.entity.types.EntityStore
 import whisk.core.{ConfigKeys, WhiskConfig}
 import whisk.spi.SpiLoader
 import akka.event.Logging.InfoLevel
 
 import scala.annotation.tailrec
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext, Future, Promise}
+import scala.concurrent.{ExecutionContext, Future, Promise}
 import scala.util.{Failure, Success}
 
 case class LoadbalancerConfig(blackboxFraction: Double, invokerBusyThreshold: 
Int)
@@ -50,9 +48,6 @@ class ContainerPoolBalancer(config: WhiskConfig, 
controllerInstance: InstanceId)
 
   private val lbConfig = 
loadConfigOrThrow[LoadbalancerConfig](ConfigKeys.loadbalancer)
 
-  /** Used to manage an action for testing invoker health */ /** Used to 
manage an action for testing invoker health */
-  private val entityStore = WhiskEntityStore.datastore(config)
-
   /** The execution context for futures */
   private implicit val executionContext: ExecutionContext = 
actorSystem.dispatcher
 
@@ -168,28 +163,6 @@ class ContainerPoolBalancer(config: WhiskConfig, 
controllerInstance: InstanceId)
   })
   }
 
-  /**
-   * Creates or updates a health test action by updating the entity store.
-   * This method is intended for use on startup.
-   * @return Future that completes successfully iff the action is added to the 
database
-   */
-  private def createTestActionForInvokerHealth(db: EntityStore, action: 
WhiskAction): Future[Unit] = {
-implicit val tid = TransactionId.loadbalancer
-WhiskAction
-  .get(db, action.docid)
-  .flatMap { oldAction =>
-WhiskAction.put(db, action.revision(oldAction.rev))(tid, notifier = 
None)
-  }
-  .recover {
-case _: NoDocumentException => WhiskAction.put(db, action)(tid, 
notifier = None)
-  }
-  .map(_ => {})
-  .andThen {
-case Success(_) => logging.info(this, "test action for invoker health 
now exists")
-case Failure(e) => logging.error(this, s"error creating test action 
for invoker health: $e")
-  }
-  }
-
   /** Gets a producer which can publish messages to the kafka bus. */
   private val messagingProvider = SpiLoader.get[MessagingProvider]
   private val messageProducer = messagingProvider.getProducer(config, 
executionContext)
@@ -216,29 +189,15 @@ class ContainerPoolBalancer(config: WhiskConfig, 
controllerInstance: InstanceId)
   case Failure(e) => transid.failed(this, start, s"error on posting to 
topic $topic")
 }
   }
-  private val invokerPool = {
-// Do not create the invokerPool if it is not possible to create the 
health test action to recover the invokers.
-InvokerPool
-  .healthAction(controllerInstance)
-  .map {
-// Await the creation of the test action; on failure, this will abort 
the constructor which should
-// in turn abort the startup of the controller.
-a =>
-  Await.result(createTestActionForInvokerHealth(entityStore, a), 
1.minute)
-  }
-  .orElse {
-throw new IllegalStateException(
-  "cannot create test action for invoker health because runtime 
manifest is not valid"

[incubator-openwhisk] branch master updated: Make HA tests for controller more lenient. (#3230)

2018-02-01 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new e7d2c7c  Make HA tests for controller more lenient. (#3230)
e7d2c7c is described below

commit e7d2c7cc131ce84c90c75cbeccc2f6243676f543
Author: Markus Thömmes 
AuthorDate: Thu Feb 1 09:33:37 2018 +0100

Make HA tests for controller more lenient. (#3230)

- Adjusts the value of allowed failures slightly.
- Corrects no longer valid comment.
- Uses the right value for limit calculation.
---
 tests/src/test/scala/ha/ShootComponentsTests.scala | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/tests/src/test/scala/ha/ShootComponentsTests.scala 
b/tests/src/test/scala/ha/ShootComponentsTests.scala
index c6e65dc..6c2d00c 100644
--- a/tests/src/test/scala/ha/ShootComponentsTests.scala
+++ b/tests/src/test/scala/ha/ShootComponentsTests.scala
@@ -58,7 +58,7 @@ class ShootComponentsTests
 
   // Throttle requests to the remaining controllers to avoid getting 429s. (60 
req/min)
   val amountOfControllers = 
WhiskProperties.getProperty(WhiskConfig.controllerInstances).toInt
-  val limit = 
WhiskProperties.getProperty(WhiskConfig.actionInvokeConcurrentLimit).toDouble
+  val limit = 
WhiskProperties.getProperty(WhiskConfig.actionInvokePerMinuteLimit).toDouble
   val limitPerController = limit / amountOfControllers
   val allowedRequestsPerMinute = (amountOfControllers - 1.0) * 
limitPerController
   val timeBeweenRequests = 60.seconds / allowedRequestsPerMinute
@@ -187,11 +187,11 @@ class ShootComponentsTests
   val requests = requestsBeforeRestart ++ requestsAfterRestart
 
   val unsuccessfulInvokes = requests.map(_._1).count(_ != 
TestUtils.SUCCESS_EXIT)
-  // Allow 3 failures for the 100 seconds
-  unsuccessfulInvokes should be <= 3
+  // Allow 5 failures for the 100 seconds
+  unsuccessfulInvokes should be <= 5
 
   val unsuccessfulGets = requests.map(_._2).count(_ != 
TestUtils.SUCCESS_EXIT)
-  // Only allow 1 failure in GET requests, because they are idempotent and 
they should be passed to the next controller if one crashes
+  // Allow no failures in GET requests, because they are idempotent and 
they should be passed to the next controller if one crashes
   unsuccessfulGets shouldBe 0
 
   // Check that both controllers are up

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Refactor controller role to user variables for hostname and host index. (#3206)

2018-02-08 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 674b26c  Refactor controller role to user variables for hostname and 
host index. (#3206)
674b26c is described below

commit 674b26cf175dd5402c6e22b0f4265fda545e1c18
Author: James Dubee 
AuthorDate: Thu Feb 8 10:08:08 2018 -0500

Refactor controller role to user variables for hostname and host index. 
(#3206)

- Define variables for frequently referenced operations controller role
- Move controller.jmxremote.jvmArgs to deploy.yml
---
 ansible/group_vars/all|  7 --
 ansible/roles/controller/tasks/clean.yml  | 14 ---
 ansible/roles/controller/tasks/deploy.yml | 42 ++-
 3 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 92b047e..6978fb9 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -43,9 +43,6 @@ limits:
   firesPerMinute: "{{ limit_fires_per_minute | default(60) }}"
   sequenceMaxLength: "{{ limit_sequence_max_length | default(50) }}"
 
-controllerHostnameFromMap: "{{ groups['controllers'] | map('extract', 
hostvars, 'ansible_host') | list | first }}"
-controllerHostname: "{{ controllerHostnameFromMap | 
default(inventory_hostname) }}"
-
 # port means outer port
 controller:
   dir:
@@ -68,10 +65,6 @@ controller:
   # We recommend to enable HA for the controllers only, if bookkeeping data 
are shared too. (localBookkeeping: false)
   ha: "{{ controller_enable_ha | default(True) and groups['controllers'] | 
length > 1 }}"
   loglevel: "{{ controller_loglevel | default(whisk_loglevel) | 
default('INFO') }}"
-  jmxremote:
-jvmArgs:  "{% if inventory_hostname in groups['controllers'] %}
-{{ jmx.jvmCommonArgs }} -Djava.rmi.server.hostname={{ controllerHostname 
}} -Dcom.sun.management.jmxremote.rmi.port={{ jmx.rmiBasePortController + 
groups['controllers'].index(inventory_hostname) }} 
-Dcom.sun.management.jmxremote.port={{ jmx.basePortController + 
groups['controllers'].index(inventory_hostname) }}
-{% endif %}"
 
 jmx:
   basePortController: 15000
diff --git a/ansible/roles/controller/tasks/clean.yml 
b/ansible/roles/controller/tasks/clean.yml
index 231198a..c34b8e8 100644
--- a/ansible/roles/controller/tasks/clean.yml
+++ b/ansible/roles/controller/tasks/clean.yml
@@ -1,21 +1,29 @@
 ---
 # Remove controller containers.
 
+- name: get controller index
+  set_fact:
+controller_index: "{{ groups['controllers'].index(inventory_hostname) }}"
+
+- name: get controller name
+  set_fact:
+controller_name: "controller{{ controller_index }}"
+
 - name: remove controller
   docker_container:
-name: "controller{{ groups['controllers'].index(inventory_hostname) }}"
+name: "{{ controller_name }}"
 image: "{{ docker_registry }}{{ docker.image.prefix }}/controller:{{ 
docker.image.tag }}"
 state: absent
   ignore_errors: True
 
 - name: remove controller log directory
   file:
-path: "{{ whisk_logs_dir }}/controller{{ 
groups['controllers'].index(inventory_hostname) }}"
+path: "{{ whisk_logs_dir }}/{{ controller_name }}"
 state: absent
   become: "{{ logs.dir.become }}"
 
 - name: remove controller conf directory
   file:
-path: "{{ controller.confdir }}/controller{{ 
groups['controllers'].index(inventory_hostname) }}"
+path: "{{ controller.confdir }}/{{ controller_name }}"
 state: absent
   become: "{{ controller.dir.become }}"
diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index e673438..8601a29 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -3,6 +3,14 @@
 
 - include: docker_login.yml
 
+- name: get controller index
+  set_fact:
+controller_index: "{{ groups['controllers'].index(inventory_hostname) }}"
+
+- name: get controller name
+  set_fact:
+controller_name: "controller{{ controller_index }}"
+
 - name: "pull the {{ docker.image.tag }} image of controller"
   shell: "docker pull {{ docker_registry }}{{ docker.image.prefix 
}}/controller:{{ docker.image.tag }}"
   when: docker_registry != ""
@@ -11,14 +19,14 @@
 
 - name: ensure controller log directory is created with permissions
   file:
-path: "{{ whisk_logs_dir }}/controller{{ 
groups['controllers'].index(inventory_hostname) }}"
+path: "{{ whisk_logs_dir }}/{

[incubator-openwhisk] branch master updated: Wait for logs based on intervals not based on total processing time. (#3273)

2018-02-12 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new ebe7788  Wait for logs based on intervals not based on total 
processing time. (#3273)
ebe7788 is described below

commit ebe7788b5261bb845e43b74525fa2ad2cacbf80b
Author: Markus Thömmes 
AuthorDate: Mon Feb 12 10:59:28 2018 +0100

Wait for logs based on intervals not based on total processing time. (#3273)

Writing a large chunk of logs can take quite some time to process. The 
standard timeout for this process is 2 seconds today. It is bounded, because an 
action developer might break the action proxy to make sentinels not appear at 
all which would cause us to infinitely wait on sentinels.

As we process logs after an activation has run though, we can safely rely 
on the time **between** two logs not exceeding a certain threshold. That way, 
the complete processing is not bounded by some arbitrary timeout (which can 
even be too short for large volumes) and is still tight enough to exit early if 
sentinels really are missing.

Furthermore, an error line is inserted if this timeout hits to inform the 
user that something might've gone wrong.
---
 .../core/containerpool/docker/DockerContainer.scala  | 16 ++--
 .../containerpool/docker/test/DockerContainerTests.scala |  7 +++
 2 files changed, 13 insertions(+), 10 deletions(-)

diff --git 
a/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
 
b/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
index 265a450..5d3083c 100644
--- 
a/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
+++ 
b/core/invoker/src/main/scala/whisk/core/containerpool/docker/DockerContainer.scala
@@ -18,6 +18,7 @@
 package whisk.core.containerpool.docker
 
 import java.time.Instant
+import java.util.concurrent.TimeoutException
 import java.util.concurrent.atomic.AtomicLong
 
 import akka.actor.ActorSystem
@@ -80,7 +81,7 @@ object DockerContainer {
 as: ActorSystem,
 ec: 
ExecutionContext,
 log: Logging): 
Future[DockerContainer] = {
-implicit val tid = transid
+implicit val tid: TransactionId = transid
 
 val environmentArgs = environment.flatMap {
   case (key, value) => Seq("-e", s"$key=$value")
@@ -246,18 +247,21 @@ class DockerContainer(protected val id: ContainerId,
 size
   }
   .via(new 
CompleteAfterOccurrences(_.containsSlice(DockerContainer.ActivationSentinel), 
2, waitForSentinel))
+  // As we're reading the logs after the activation has finished the 
invariant is that all loglines are already
+  // written and we mostly await them being flushed by the docker daemon. 
Therefore we can timeout based on the time
+  // between two loglines appear without relying on the log frequency in 
the action itself.
+  .idleTimeout(waitForLogs)
   .recover {
 case _: StreamLimitReachedException =>
   // While the stream has already ended by failing the limitWeighted 
stage above, we inject a truncation
   // notice downstream, which will be processed as usual. This will be 
the last element of the stream.
   ByteString(LogLine(Instant.now.toString, "stderr", 
Messages.truncateLogs(limit)).toJson.compactPrint)
-case _: OccurrencesNotFoundException | _: FramingException =>
+case _: OccurrencesNotFoundException | _: FramingException | _: 
TimeoutException =>
   // Stream has already ended and we insert a notice that data might 
be missing from the logs. While a
   // FramingException can also mean exceeding the limits, we cannot 
decide which case happened so we resort
   // to the general error message. This will be the last element of 
the stream.
   ByteString(LogLine(Instant.now.toString, "stderr", 
Messages.logFailure).toJson.compactPrint)
   }
-  .takeWithin(waitForLogs)
   }
 
   /** Delimiter used to split log-lines as written by the json-log-driver. */
@@ -279,9 +283,9 @@ class DockerContainer(protected val id: ContainerId,
  */
 class CompleteAfterOccurrences[T](isInEvent: T => Boolean, neededOccurrences: 
Int, errorOnNotEnough: Boolean)
 extends GraphStage[FlowShape[T, T]] {
-  val in = Inlet[T]("WaitForOccurances.in")
-  val out = Outlet[T]("WaitForOccurances.out")
-  override val shape = FlowShape.of(in, out)
+  val in: Inlet[T] = Inlet[T]("WaitForOccurrences.in")
+  val out: Outlet[T] = Outlet[T]("WaitForOccurrences.out")
+  override val sha

[incubator-openwhisk] branch master updated: Use proper API host and reduce logging noise. (#3276)

2018-02-13 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 28328c1  Use proper API host and reduce logging noise. (#3276)
28328c1 is described below

commit 28328c1043b5cbe0e55c4588fa43960e6c860b04
Author: Markus Thömmes 
AuthorDate: Wed Feb 14 08:08:04 2018 +0100

Use proper API host and reduce logging noise. (#3276)

Co-authored-by: cbickel 
Co-authored-by: jeremiaswerner 
---
 tests/src/test/scala/common/rest/WskRest.scala | 72 --
 1 file changed, 23 insertions(+), 49 deletions(-)

diff --git a/tests/src/test/scala/common/rest/WskRest.scala 
b/tests/src/test/scala/common/rest/WskRest.scala
index a8a9c16..6e30cd7 100644
--- a/tests/src/test/scala/common/rest/WskRest.scala
+++ b/tests/src/test/scala/common/rest/WskRest.scala
@@ -21,22 +21,19 @@ import java.io.File
 import java.time.Instant
 import java.util.Base64
 import java.security.cert.X509Certificate
+
 import org.apache.commons.io.FileUtils
 import org.scalatest.Matchers
 import org.scalatest.FlatSpec
 import org.scalatest.concurrent.ScalaFutures
 import org.scalatest.time.Span.convertDurationToSpan
-import scala.Left
-import scala.Right
+
 import scala.collection.JavaConversions.mapAsJavaMap
 import scala.collection.mutable.Buffer
 import scala.collection.immutable.Seq
 import scala.concurrent.duration.Duration
 import scala.concurrent.duration.DurationInt
-import scala.concurrent.{Future, Promise}
 import scala.language.postfixOps
-import scala.util.Failure
-import scala.util.Success
 import scala.util.Try
 import scala.util.{Failure, Success}
 import akka.http.scaladsl.model.StatusCode
@@ -53,16 +50,13 @@ import akka.http.scaladsl.model.ContentTypes
 import akka.http.scaladsl.Http
 import akka.http.scaladsl.model.headers.BasicHttpCredentials
 import akka.http.scaladsl.model.Uri
-import akka.http.scaladsl.model.Uri.Path
+import akka.http.scaladsl.model.Uri.{Path, Query}
 import akka.http.scaladsl.model.HttpMethods.DELETE
 import akka.http.scaladsl.model.HttpMethods.GET
 import akka.http.scaladsl.model.HttpMethods.POST
 import akka.http.scaladsl.model.HttpMethods.PUT
 import akka.http.scaladsl.HttpsConnectionContext
-import akka.http.scaladsl.settings.ConnectionPoolSettings
 import akka.stream.ActorMaterializer
-import akka.stream.scaladsl.{Keep, Sink, Source}
-import akka.stream.{OverflowStrategy, QueueOfferResult}
 import spray.json._
 import spray.json.DefaultJsonProtocol._
 import spray.json.JsObject
@@ -75,7 +69,6 @@ import common.HasActivation
 import common.RunWskCmd
 import common.TestUtils
 import common.TestUtils.SUCCESS_EXIT
-import common.TestUtils.DONTCARE_EXIT
 import common.TestUtils.ANY_ERROR_EXIT
 import common.TestUtils.DONTCARE_EXIT
 import common.TestUtils.RunResult
@@ -86,6 +79,7 @@ import common.WskProps
 import whisk.core.entity.ByteSize
 import whisk.utils.retry
 import javax.net.ssl.{HostnameVerifier, KeyManager, SSLContext, SSLSession, 
X509TrustManager}
+
 import com.typesafe.sslconfig.akka.AkkaSSLConfig
 import java.nio.charset.StandardCharsets
 
@@ -1230,49 +1224,29 @@ class RunWskRestCmd() extends FlatSpec with RunWskCmd 
with Matchers with ScalaFu
 else ""
   }
 
-  def request(method: HttpMethod,
-  uri: Uri,
-  body: Option[String] = None,
-  creds: BasicHttpCredentials): Future[HttpResponse] = {
+  def requestEntity(method: HttpMethod, path: Path, params: Map[String, 
String] = Map(), body: Option[String] = None)(
+implicit wp: WskProps): HttpResponse = {
+
+val creds = getBasicHttpCredentials(wp)
+
+// startsWith(http) includes https
+val hostWithScheme = if (wp.apihost.startsWith("http")) {
+  Uri(wp.apihost)
+} else {
+  Uri().withScheme("https").withHost(wp.apihost)
+}
+
 val entity = body map { b =>
   HttpEntity(ContentTypes.`application/json`, b)
 } getOrElse HttpEntity(ContentTypes.`application/json`, "")
-val request = HttpRequest(method, uri, List(Authorization(creds)), entity 
= entity)
-val connectionPoolSettings =
-  
ConnectionPoolSettings(actorSystem).withMaxOpenRequests(maxOpenRequest).withIdleTimeout(idleTimeout)
-val pool = Http().cachedHostConnectionPoolHttps[Promise[HttpResponse]](
-  host = WhiskProperties.getApiHost,
-  connectionContext = connectionContext,
-  settings = connectionPoolSettings)
-val queue = Source
-  .queue[(HttpRequest, Promise[HttpResponse])](queueSize, 
OverflowStrategy.dropNew)
-  .via(pool)
-  .toMat(Sink.foreach({
-case ((Success(resp), p)) => p.success(resp)
-case ((Failure(e), p))=> p.failure(e)
-  }))(Keep.left)
-  .run
-
-val promise = Promise[HttpResponse]
-val responsePromise = Promise[HttpResponse]()
-  

[incubator-openwhisk] branch master updated: Add a loadbalancer with local state and horizontal invoker sharding. (#3240)

2018-02-13 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new a2d9781  Add a loadbalancer with local state and horizontal invoker 
sharding. (#3240)
a2d9781 is described below

commit a2d978108a97a36ad063ceb61dad63419392f076
Author: Markus Thömmes 
AuthorDate: Wed Feb 14 08:12:26 2018 +0100

Add a loadbalancer with local state and horizontal invoker sharding. (#3240)

The current ContainerPoolBalancer suffers a couple of problems and 
bottlenecks:

1. **Inconsistent state:** The data-structures keeping the state for that 
loadbalancer are not thread-safely handled, meaning there can be queuing to 
some invokers even though there is free capacity on other invokers.
2. **Asynchronously shared state:** Sharing the state is needed for a 
high-available deployment of multiple controllers and for horizontal scale in 
those. Said state-sharing makes point 1 even worse and isn't anywhere fast 
enough to be able to efficiently schedule quick bursts.
3. **Bottlenecks:** Getting the state from the outside (like for the 
ActivationThrottle) is a very costly operation (at least in the sharedstate 
case) and actually bottlenecks the whole invocation path. Getting the current 
state of the invokers is a second bottleneck, where one request is made to the 
corresponding actor for each invocation.

This new implementation aims to solve the problems mentioned above as 
follows:

1. **All state is local:** There is no shared state. Resources are managed 
through horizontal sharding. Horizontal sharding means: The invokers' slots are 
evenly divided between the loadbalancers in existence. If we deploy 2 
loadbalancers and each invoker has 16 slots, each of the loadbalancers will 
have access to 8 slots on each invoker.
2. **Slots are given away atomically:** When scheduling an activation, the 
slot is immediatly assigned to that activation (implemented through 
Semaphores). That means: Even in concurrent schedules, there will not be an 
overload on an invoker as long as there is capacity left on that invoker.
3. **Asynchronous updates of slow data:** Slowly changing data, like a 
change in the invoker's state, is asynchronously handled and updated to a local 
version of the state. Querying the state is as cheap as it can be.
---
 ansible/group_vars/all |   2 +
 ansible/roles/controller/tasks/deploy.yml  |   1 +
 .../scala/whisk/common/ForcableSemaphore.scala | 124 ++
 .../scala/whisk/core/controller/Controller.scala   |   2 +
 .../core/entitlement/ActivationThrottler.scala |  28 +-
 .../scala/whisk/core/entitlement/Entitlement.scala |  80 ++--
 .../whisk/core/entitlement/RateThrottler.scala |  11 +-
 .../core/loadBalancer/ContainerPoolBalancer.scala  |   2 +
 .../core/loadBalancer/InvokerSupervision.scala |  12 +-
 .../whisk/core/loadBalancer/LoadBalancer.scala |   5 +
 .../ShardingContainerPoolBalancer.scala| 453 +
 docs/deploy.md |   6 +
 tests/src/test/scala/limits/ThrottleTests.scala|  21 +-
 .../whisk/common/ForcableSemaphoreTests.scala  |  88 
 .../core/controller/test/RateThrottleTests.scala   |   6 +-
 .../test/ShardingContainerPoolBalancerTests.scala  | 174 
 16 files changed, 947 insertions(+), 68 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 54c2d68..8f14ea6 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -56,6 +56,8 @@ controller:
   seedNodes: "{{ groups['controllers'] | map('extract', hostvars, 
'ansible_host') | list }}"
   # We recommend to enable HA for the controllers only, if bookkeeping data 
are shared too. (localBookkeeping: false)
   ha: "{{ controller_enable_ha | default(True) and groups['controllers'] | 
length > 1 }}"
+  loadbalancer:
+spi: "{{ controller_loadbalancer_spi | default('') }}"
   loglevel: "{{ controller_loglevel | default(whisk_loglevel) | 
default('INFO') }}"
 
 jmx:
diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index 9c19dd4..3fa07b5 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -157,6 +157,7 @@
   "CONFIG_kamon_statsd_port": "{{ metrics.kamon.port }}"
 
   "CONFIG_whisk_spi_LogStoreProvider": "{{ userLogs.spi }}"
+  "CONFIG_whisk_spi_LoadBalancerProvider": "{{ controller.loadbalancer.spi 
}}"
   
   "CONFIG_logback_log_level": "{{ controller.loglevel }}"
 
diff --git a/common/scala/src/main/scal

[incubator-openwhisk] branch master updated: Replace asString and asBool with generic method. (#3280)

2018-02-13 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 5da6003  Replace asString and asBool with generic method. (#3280)
5da6003 is described below

commit 5da6003edcb8e8d658a60f8654487ce10c17cd53
Author: Markus Thömmes 
AuthorDate: Wed Feb 14 08:17:26 2018 +0100

Replace asString and asBool with generic method. (#3280)

Mainly aims to reduce boilerplate in code which tries to extract 
non-primitive data from parameters/annotations. Any supported json-format (for 
example case classes) can be loaded safely from parameters.
---
 .../main/scala/whisk/core/entity/Parameter.scala   | 31 +++---
 .../main/scala/whisk/core/entity/WhiskAction.scala |  2 +-
 .../scala/whisk/core/controller/WebActions.scala   |  8 +++---
 3 files changed, 9 insertions(+), 32 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/core/entity/Parameter.scala 
b/common/scala/src/main/scala/whisk/core/entity/Parameter.scala
index 7355a43..937913d 100644
--- a/common/scala/src/main/scala/whisk/core/entity/Parameter.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/Parameter.scala
@@ -96,34 +96,11 @@ protected[core] class Parameters protected[entity] (private 
val params: Map[Para
 Some { (toJsObject.fields ++ args.fields).toJson.asJsObject }
   }
 
-  /**
-   * Retrieves parameter by name if it exists.
-   */
-  protected[core] def get(p: String): Option[JsValue] = {
-params.get(new ParameterName(p)).map(_.value)
-  }
-
-  /**
-   * Retrieves parameter by name if it exist. If value of parameter
-   * is a boolean, return its value else false.
-   */
-  protected[core] def asBool(p: String): Option[Boolean] = {
-get(p) flatMap {
-  case JsBoolean(b) => Some(b)
-  case _=> None
-}
-  }
+  /** Retrieves parameter by name if it exists. */
+  protected[core] def get(p: String): Option[JsValue] = params.get(new 
ParameterName(p)).map(_.value)
 
-  /**
-   * Retrieves parameter by name if it exist. If value of parameter
-   * is a string, return its value else none.
-   */
-  protected[core] def asString(p: String): Option[String] = {
-get(p) flatMap {
-  case JsString(s) => Some(s)
-  case _   => None
-}
-  }
+  /** Retrieves parameter by name if it exists. Returns that parameter if it 
is deserializable to {@code T} */
+  protected[core] def getAs[T: JsonReader](p: String): Option[T] = 
get(p).flatMap(js => Try(js.convertTo[T]).toOption)
 }
 
 /**
diff --git a/common/scala/src/main/scala/whisk/core/entity/WhiskAction.scala 
b/common/scala/src/main/scala/whisk/core/entity/WhiskAction.scala
index adddf81..92b7db3 100644
--- a/common/scala/src/main/scala/whisk/core/entity/WhiskAction.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/WhiskAction.scala
@@ -78,7 +78,7 @@ abstract class WhiskActionLike(override val name: EntityName) 
extends WhiskEntit
 
   /** @return true iff action has appropriate annotation. */
   def hasFinalParamsAnnotation = {
-annotations.asBool(WhiskAction.finalParamsAnnotationName) getOrElse false
+annotations.getAs[Boolean](WhiskAction.finalParamsAnnotationName) 
getOrElse false
   }
 
   /** @return a Set of immutable parameternames */
diff --git 
a/core/controller/src/main/scala/whisk/core/controller/WebActions.scala 
b/core/controller/src/main/scala/whisk/core/controller/WebActions.scala
index 2642366..cd3c68c 100644
--- a/core/controller/src/main/scala/whisk/core/controller/WebActions.scala
+++ b/core/controller/src/main/scala/whisk/core/controller/WebActions.scala
@@ -483,7 +483,7 @@ trait WhiskWebActionsApi extends Directives with 
ValidateRequestSize with PostAc
   provide(fullyQualifiedActionName(actionName)) { fullActionName =>
 onComplete(verifyWebAction(fullActionName, 
onBehalfOf.isDefined)) {
   case Success((actionOwnerIdentity, action)) =>
-if 
(!action.annotations.asBool("web-custom-options").exists(identity)) {
+if 
(!action.annotations.getAs[Boolean]("web-custom-options").exists(identity)) {
   respondWithHeaders(defaultCorsResponse(context.headers)) 
{
 if (context.method == OPTIONS) {
   complete(OK, HttpEntity.Empty)
@@ -559,7 +559,7 @@ trait WhiskWebActionsApi extends Directives with 
ValidateRequestSize with PostAc
   processRequest(actionOwnerIdentity, action, extension, onBehalfOf, 
context.withBody(body), isRawHttpAction)
 }
 
-provide(action.annotations.asBool("raw-http").exists(identity)) { 
isRawHttpAction =>
+provide(action.annotations.getAs[Boolean]("raw-http").exists(identity)) { 
isRawHttpAction =>
   httpEntity match {
   

[incubator-openwhisk] branch master updated: Check all testfiles to actually contain the `RunWith` annotation. (#3290)

2018-02-16 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new a8434b7  Check all testfiles to actually contain the `RunWith` 
annotation. (#3290)
a8434b7 is described below

commit a8434b7c3eccaa5b4627849ce3cbff78dc67b41e
Author: Markus Thömmes 
AuthorDate: Fri Feb 16 10:14:53 2018 +0100

Check all testfiles to actually contain the `RunWith` annotation. (#3290)

We had a couple of rogue tests in the repository which didn't have the 
annotation `RunWith`, so they haven't been run in CI/CD at all.
---
 .../{JsonArgsForTests.scala => TestJsonArgs.scala} |  2 +-
 .../whisk/core/cli/test/WskBasicUsageTests.scala   |  2 +-
 .../loadBalancer/test/LoadBalancerDataTests.scala  |  3 ++
 ...iderTest.scala => SeedNodesProviderTests.scala} |  7 ++--
 .../loadBalancer/test/SharedDataServiceTests.scala | 40 --
 tools/travis/build.sh  |  9 +
 6 files changed, 34 insertions(+), 29 deletions(-)

diff --git a/tests/src/test/scala/whisk/core/cli/test/JsonArgsForTests.scala 
b/tests/src/test/scala/whisk/core/cli/test/TestJsonArgs.scala
similarity index 99%
rename from tests/src/test/scala/whisk/core/cli/test/JsonArgsForTests.scala
rename to tests/src/test/scala/whisk/core/cli/test/TestJsonArgs.scala
index 9d8910e..474e820 100644
--- a/tests/src/test/scala/whisk/core/cli/test/JsonArgsForTests.scala
+++ b/tests/src/test/scala/whisk/core/cli/test/TestJsonArgs.scala
@@ -23,7 +23,7 @@ import spray.json.JsString
 import spray.json.JsNumber
 import spray.json.JsBoolean
 
-object JsonArgsForTests {
+object TestJsonArgs {
 
   def getInvalidJSONInput =
 Seq(
diff --git a/tests/src/test/scala/whisk/core/cli/test/WskBasicUsageTests.scala 
b/tests/src/test/scala/whisk/core/cli/test/WskBasicUsageTests.scala
index 970d569..925c098 100644
--- a/tests/src/test/scala/whisk/core/cli/test/WskBasicUsageTests.scala
+++ b/tests/src/test/scala/whisk/core/cli/test/WskBasicUsageTests.scala
@@ -45,7 +45,7 @@ import whisk.core.entity.LogLimit._
 import whisk.core.entity.MemoryLimit._
 import whisk.core.entity.TimeLimit._
 import whisk.core.entity.size.SizeInt
-import JsonArgsForTests._
+import TestJsonArgs._
 import whisk.http.Messages
 
 /**
diff --git 
a/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerDataTests.scala 
b/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerDataTests.scala
index 9afa67f..b60989a 100644
--- 
a/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerDataTests.scala
+++ 
b/tests/src/test/scala/whisk/core/loadBalancer/test/LoadBalancerDataTests.scala
@@ -21,6 +21,8 @@ import akka.actor.ActorSystem
 import akka.actor.Cancellable
 import com.typesafe.config.{ConfigFactory, ConfigValueFactory}
 import common.StreamLogging
+import org.junit.runner.RunWith
+import org.scalatest.junit.JUnitRunner
 import org.scalatest.{FlatSpec, Matchers}
 import whisk.core.entity.{ActivationId, UUID, WhiskActivation}
 import whisk.core.loadBalancer.{ActivationEntry, DistributedLoadBalancerData, 
LocalLoadBalancerData}
@@ -30,6 +32,7 @@ import whisk.core.entity.InstanceId
 
 import scala.concurrent.duration._
 
+@RunWith(classOf[JUnitRunner])
 class LoadBalancerDataTests extends FlatSpec with Matchers with StreamLogging {
   final val emptyCancellable: Cancellable = new Cancellable {
 def isCancelled = false
diff --git 
a/tests/src/test/scala/whisk/core/loadBalancer/test/SeedNodesProviderTest.scala 
b/tests/src/test/scala/whisk/core/loadBalancer/test/SeedNodesProviderTests.scala
similarity index 91%
rename from 
tests/src/test/scala/whisk/core/loadBalancer/test/SeedNodesProviderTest.scala
rename to 
tests/src/test/scala/whisk/core/loadBalancer/test/SeedNodesProviderTests.scala
index 345c885..ef4ae85 100644
--- 
a/tests/src/test/scala/whisk/core/loadBalancer/test/SeedNodesProviderTest.scala
+++ 
b/tests/src/test/scala/whisk/core/loadBalancer/test/SeedNodesProviderTests.scala
@@ -18,10 +18,13 @@
 package whisk.core.loadBalancer.test
 
 import akka.actor.Address
+import org.junit.runner.RunWith
+import org.scalatest.junit.JUnitRunner
 import org.scalatest.{FlatSpec, Matchers}
-import whisk.core.loadBalancer.{StaticSeedNodesProvider}
+import whisk.core.loadBalancer.StaticSeedNodesProvider
 
-class SeedNodesProviderTest extends FlatSpec with Matchers {
+@RunWith(classOf[JUnitRunner])
+class SeedNodesProviderTests extends FlatSpec with Matchers {
 
   val actorSystemName = "controller-actor-system"
   val host = "192.168.99.100"
diff --git 
a/tests/src/test/scala/whisk/core/loadBalancer/test/SharedDataServiceTests.scala
 
b/tests/src/test/scala/whisk/core/loadBalancer/test/SharedDataServiceTests.scala
index 3961e53..b3b37a7 100644
--- 
a/tests/src/test/scala/whisk/core/loadBalancer/test/SharedDataServiceTests.scala
++

[incubator-openwhisk] branch master updated: Bump swift test-timeout to 2 minutes. (#3296)

2018-02-16 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 43a2a0f  Bump swift test-timeout to 2 minutes. (#3296)
43a2a0f is described below

commit 43a2a0f92a65c1d0510c693fef77a774cabe29f7
Author: Markus Thömmes 
AuthorDate: Fri Feb 16 15:37:03 2018 +0100

Bump swift test-timeout to 2 minutes. (#3296)

- This test is prone to errors because compiling swift apparently is 
subject to a lot variance (seen up to 1 minute).
- Refactor of some test helpers.
---
 tests/src/test/scala/common/WskTestHelpers.scala   | 34 +++---
 .../scala/whisk/core/cli/test/Swift311Tests.scala  |  2 +-
 2 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/tests/src/test/scala/common/WskTestHelpers.scala 
b/tests/src/test/scala/common/WskTestHelpers.scala
index 5784824..c9073f6 100644
--- a/tests/src/test/scala/common/WskTestHelpers.scala
+++ b/tests/src/test/scala/common/WskTestHelpers.scala
@@ -223,16 +223,14 @@ trait WskTestHelpers extends Matchers {
  totalWait: Duration)(check: ActivationResult => 
Unit)(implicit wskprops: WskProps): Unit = {
 val id = activationId
 val activation = wsk.waitForActivation(id, initialWait, pollPeriod, 
totalWait)
-if (activation.isLeft) {
-  assert(false, s"error waiting for activation $id: 
${activation.left.get}")
-} else
-  try {
-check(activation.right.get.convertTo[ActivationResult])
-  } catch {
-case error: Throwable =>
-  println(s"check failed for activation $id: ${activation.right.get}")
-  throw error
-  }
+
+activation match {
+  case Left(reason) => fail(s"error waiting for activation $id for 
$totalWait: $reason")
+  case Right(result) =>
+withRethrowingPrint(s"check failed for activation $id: $result") {
+  check(result.convertTo[ActivationResult])
+}
+}
   }
 
   /**
@@ -282,6 +280,22 @@ trait WskTestHelpers extends Matchers {
 }
   }
 
+  /**
+   * Prints the given information iff the inner test fails. Rethrows the tests 
exception to get a meaningful
+   * stacktrace.
+   *
+   * @param information additional information to print
+   * @param test test to run
+   */
+  def withRethrowingPrint(information: String)(test: => Unit): Unit = {
+try test
+catch {
+  case error: Throwable =>
+println(information)
+throw error
+}
+  }
+
   def removeCLIHeader(response: String): String = {
 if (response.contains("\n")) response.substring(response.indexOf("\n")) 
else response
   }
diff --git a/tests/src/test/scala/whisk/core/cli/test/Swift311Tests.scala 
b/tests/src/test/scala/whisk/core/cli/test/Swift311Tests.scala
index b0b1fee..f43778f 100644
--- a/tests/src/test/scala/whisk/core/cli/test/Swift311Tests.scala
+++ b/tests/src/test/scala/whisk/core/cli/test/Swift311Tests.scala
@@ -35,7 +35,7 @@ class Swift311Tests extends TestHelpers with WskTestHelpers 
with Matchers {
 
   implicit val wskprops = WskProps()
   val wsk = new WskRest
-  val activationPollDuration = 60 seconds
+  val activationPollDuration = 2.minutes
   val defaultJsAction = Some(TestUtils.getTestActionFilename("hello.js"))
 
   lazy val runtimeContainer = "swift:3.1.1"

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Actually retry package remove on conflict. (#3303)

2018-02-19 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 94ed1db  Actually retry package remove on conflict. (#3303)
94ed1db is described below

commit 94ed1db74625b0084216b7b493082b7d28135b3f
Author: Markus Thömmes 
AuthorDate: Tue Feb 20 08:11:13 2018 +0100

Actually retry package remove on conflict. (#3303)

The AssetHelper is supposed to retry a package deletion if it returns a 
conflic. This can happen, if the package contained other entities (like an 
action), which got deleted first but the package's view does not yet reflect 
that deletion. The DELETE call will then complain about "entities still in 
package". A retry resolves that issue.

- Some cleanup of the helpers file.
- Refactoring of the annotations helper.
---
 tests/src/test/scala/common/WskTestHelpers.scala | 30 
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/tests/src/test/scala/common/WskTestHelpers.scala 
b/tests/src/test/scala/common/WskTestHelpers.scala
index c9073f6..dbd03ca 100644
--- a/tests/src/test/scala/common/WskTestHelpers.scala
+++ b/tests/src/test/scala/common/WskTestHelpers.scala
@@ -28,6 +28,7 @@ import scala.concurrent.duration.Duration
 import scala.concurrent.duration.DurationInt
 
 import spray.json._
+import spray.json.DefaultJsonProtocol._
 
 import TestUtils.RunResult
 import TestUtils.CONFLICT
@@ -56,7 +57,7 @@ object ActivationResponse extends DefaultJsonProtocol {
  * @param start an Instant to save the start time of activation
  * @param end an Instant to save the end time of activation
  * @param duration a Long to save the duration of the activation
- * @param cases String to save the cause of failure if the activation fails
+ * @param cause String to save the cause of failure if the activation fails
  * @param annotations a list of JSON objects to save the annotations of the 
activation
  */
 case class ActivationResult(activationId: String,
@@ -68,15 +69,10 @@ case class ActivationResult(activationId: String,
 cause: Option[String],
 annotations: Option[List[JsObject]]) {
 
-  def getAnnotationValue(key: String): Option[JsValue] = {
-Try {
-  val annotation = annotations.get.filter(x => x.getFields("key")(0) == 
JsString(key))
-  assert(annotation.size == 1) // only one annotation with this value
-  val value = annotation(0).getFields("value")
-  assert(value.size == 1)
-  value(0)
-}.toOption
-  }
+  def getAnnotationValue(key: String): Option[JsValue] =
+annotations
+  .flatMap(_.find(_.fields("key").convertTo[String] == key))
+  .map(_.fields("value"))
 }
 
 object ActivationResult extends DefaultJsonProtocol {
@@ -154,7 +150,7 @@ trait WskTestHelpers extends Matchers {
* list that is iterated at the end of the test so that these entities are 
deleted
* (from most recently created to oldest).
*/
-  def withAssetCleaner(wskprops: WskProps)(test: (WskProps, AssetCleaner) => 
Any) = {
+  def withAssetCleaner(wskprops: WskProps)(test: (WskProps, AssetCleaner) => 
Any): Unit = {
 // create new asset list to track what must be deleted after test completes
 val assetsToDeleteAfterTest = new Assets()
 
@@ -168,14 +164,18 @@ trait WskTestHelpers extends Matchers {
 } finally {
   // delete assets in reverse order so that was created last is deleted 
first
   val deletedAll = assetsToDeleteAfterTest.reverse map {
-case ((cli, n, delete)) =>
+case (cli, n, delete) =>
   n -> Try {
 cli match {
   case _: BasePackage if delete =>
-val rr = cli.delete(n)(wskprops)
+// sanitize ignores the exit code, so we can inspect the 
actual result and retry accordingly
+val rr = cli.sanitize(n)(wskprops)
 rr.exitCode match {
   case CONFLICT | StatusCodes.Conflict.intValue =>
-whisk.utils.retry(cli.delete(n)(wskprops), 5, 
Some(1.second))
+whisk.utils.retry({
+  println("package deletion conflict, view computation 
delay likely, retrying...")
+  cli.delete(n)(wskprops)
+}, 5, Some(1.second))
   case _ => rr
 }
   case _ => if (delete) cli.delete(n)(wskprops) else 
cli.sanitize(n)(wskprops)
@@ -260,7 +260,7 @@ trait WskTestHelpers extends Matchers {
   check(parsed)
 } catch {
   case error: Throwable =>
-println(s"check failed for activations $activationIds: ${parsed}")
+println(s"check failed for activations $activationIds: $p

[incubator-openwhisk] branch master updated: Rework rule tests to not rely on views anymore. (#3309)

2018-02-21 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new a6fd056  Rework rule tests to not rely on views anymore. (#3309)
a6fd056 is described below

commit a6fd056c92683ce42b662c6cb24cd50d87f2cbc2
Author: Markus Thömmes 
AuthorDate: Wed Feb 21 10:41:43 2018 +0100

Rework rule tests to not rely on views anymore. (#3309)

Relying on views is brittle and will fail if the database is under high 
load. We don't need to rely on those anymore.
---
 tests/src/test/scala/common/WskTestHelpers.scala   |  40 +---
 .../src/test/scala/system/basic/WskRuleTests.scala | 230 +
 .../test/scala/system/basic/WskSequenceTests.scala |   8 +-
 3 files changed, 117 insertions(+), 161 deletions(-)

diff --git a/tests/src/test/scala/common/WskTestHelpers.scala 
b/tests/src/test/scala/common/WskTestHelpers.scala
index eb4bad6..5be175c 100644
--- a/tests/src/test/scala/common/WskTestHelpers.scala
+++ b/tests/src/test/scala/common/WskTestHelpers.scala
@@ -119,6 +119,12 @@ object ActivationResult extends DefaultJsonProtocol {
   }
 }
 
+/** The result of a rule-activation written into the trigger activation */
+case class RuleActivationResult(statusCode: Int, success: Boolean, 
activationId: String, action: String)
+object RuleActivationResult extends DefaultJsonProtocol {
+  implicit val serdes = jsonFormat4(RuleActivationResult.apply)
+}
+
 /**
  * Test fixture to ease cleaning of whisk entities created during testing.
  *
@@ -232,37 +238,9 @@ trait WskTestHelpers extends Matchers {
 }
 }
   }
-
-  /**
-   * Polls until it finds {@code N} activationIds from an entity. Asserts the 
count
-   * of the activationIds actually equal {@code N}. Takes a {@code since} 
parameter
-   * defining the oldest activationId to consider valid.
-   */
-  def withActivationsFromEntity(
-wsk: BaseActivation,
-entity: String,
-N: Int = 1,
-since: Option[Instant] = None,
-pollPeriod: Duration = 1.second,
-totalWait: Duration = 60.seconds)(check: Seq[ActivationResult] => 
Unit)(implicit wskprops: WskProps): Unit = {
-
-val activationIds =
-  wsk.pollFor(N, Some(entity), since = since, retries = (totalWait / 
pollPeriod).toInt, pollPeriod = pollPeriod)
-withClue(
-  s"expecting $N activations matching '$entity' name since $since but 
found ${activationIds.mkString(",")} instead") {
-  activationIds.length shouldBe N
-}
-
-val parsed = activationIds.map { id =>
-  wsk.parseJsonString(wsk.get(Some(id)).stdout).convertTo[ActivationResult]
-}
-try {
-  check(parsed)
-} catch {
-  case error: Throwable =>
-println(s"check failed for activations $activationIds: $parsed")
-throw error
-}
+  def withActivation(wsk: BaseActivation, activationId: String)(check: 
ActivationResult => Unit)(
+implicit wskprops: WskProps): Unit = {
+withActivation(wsk, activationId, 1.second, 1.second, 60.seconds)(check)
   }
 
   /**
diff --git a/tests/src/test/scala/system/basic/WskRuleTests.scala 
b/tests/src/test/scala/system/basic/WskRuleTests.scala
index 62890b7..bd30bc7 100644
--- a/tests/src/test/scala/system/basic/WskRuleTests.scala
+++ b/tests/src/test/scala/system/basic/WskRuleTests.scala
@@ -25,6 +25,7 @@ import common.TestUtils.RunResult
 import common.BaseWsk
 import common.WskProps
 import common.WskTestHelpers
+import common.RuleActivationResult
 import spray.json._
 import spray.json.DefaultJsonProtocol._
 import java.time.Instant
@@ -40,14 +41,6 @@ abstract class WskRuleTests extends TestHelpers with 
WskTestHelpers {
   val testResult = JsObject("count" -> testString.split(" ").length.toJson)
 
   /**
-   * Invoker clock skew can sometimes make it appear as if an action was 
invoked
-   * _before_ the trigger was fired. The "fudge factor" below allows the test 
to look
-   * for action activations that occur starting at most this amount of time 
before
-   * the trigger was fired.
-   */
-  val activationTimeSkewFactorMs = 500
-
-  /**
* Sets up trigger -> rule -> action triplets. Deduplicates triggers and 
rules
* and links it all up.
*
@@ -56,7 +49,7 @@ abstract class WskRuleTests extends TestHelpers with 
WskTestHelpers {
*   where the action name for the created action is allowed to differ from 
that used by the rule binding
*   for cases that reference actions in a package binding.
*/
-  def ruleSetup(rules: Seq[(String, String, (String, String, String))], 
assetHelper: AssetCleaner) = {
+  def ruleSetup(rules: Seq[(String, String, (String, String, String))], 
assetHelper: AssetCleaner): Unit = {
 val triggers = rules.map(_._2).distinct
 val actions = rules.map(_._3).d

[incubator-openwhisk] branch master updated: Add retry in updating-rule-test. (#3322)

2018-02-22 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 994fbe8  Add retry in updating-rule-test. (#3322)
994fbe8 is described below

commit 994fbe8df3e82ead318d8df893393bfaa859dc49
Author: Markus Thömmes 
AuthorDate: Thu Feb 22 13:04:15 2018 +0100

Add retry in updating-rule-test. (#3322)

Updating a rule causes a cache-invalidation, which needs to propagate to 
other controllers before a subsequent get is guaranteed to yield the correct 
result.
---
 .../src/test/scala/system/basic/WskRuleTests.scala | 26 +++---
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/tests/src/test/scala/system/basic/WskRuleTests.scala 
b/tests/src/test/scala/system/basic/WskRuleTests.scala
index bd30bc7..c0fc9d0 100644
--- a/tests/src/test/scala/system/basic/WskRuleTests.scala
+++ b/tests/src/test/scala/system/basic/WskRuleTests.scala
@@ -29,6 +29,8 @@ import common.RuleActivationResult
 import spray.json._
 import spray.json.DefaultJsonProtocol._
 import java.time.Instant
+import whisk.utils.retry
+import scala.concurrent.duration._
 
 @RunWith(classOf[JUnitRunner])
 abstract class WskRuleTests extends TestHelpers with WskTestHelpers {
@@ -99,14 +101,22 @@ abstract class WskRuleTests extends TestHelpers with 
WskTestHelpers {
 statusPermutations.foreach {
   case (trigger, status) =>
 if (status == active) wsk.rule.enable(ruleName) else 
wsk.rule.disable(ruleName)
-wsk.rule
-  .create(ruleName, trigger, actionName, update = true)
-  .stdout
-  .parseJson
-  .asJsObject
-  .fields
-  .get("status") shouldBe status
-
wsk.rule.get(ruleName).stdout.parseJson.asJsObject.fields.get("status") 
shouldBe status
+
+// Needs to be retried since the enable/disable causes a cache 
invalidation which needs to propagate first
+retry(
+  {
+wsk.rule
+  .create(ruleName, trigger, actionName, update = true)
+  .stdout
+  .parseJson
+  .asJsObject
+  .fields
+  .get("status") shouldBe status
+
+
wsk.rule.get(ruleName).stdout.parseJson.asJsObject.fields.get("status") 
shouldBe status
+  },
+  10,
+  Some(1.second))
 }
   }
 

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Put active-ack consumers in their own consumer-groups. (#3337)

2018-02-26 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new c184cc6  Put active-ack consumers in their own consumer-groups. (#3337)
c184cc6 is described below

commit c184cc6fdabbe857bfc1bd33aa7acda07179d1e5
Author: Markus Thömmes 
AuthorDate: Mon Feb 26 17:26:12 2018 +0100

Put active-ack consumers in their own consumer-groups. (#3337)

Just like with the invoker consumers, it doesn't make sense to have those 
in one group as crash of one will cause a rebalancing pause for the other.
---
 .../scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala | 8 +++-
 .../whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala   | 7 ++-
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
index dfa57bb..de2e56e 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
@@ -205,14 +205,12 @@ class ContainerPoolBalancer(config: WhiskConfig, 
controllerInstance: InstanceId)
* Subscribes to active acks (completion messages from the invokers), and
* registers a handler for received active acks from invokers.
*/
+  val activeAckTopic = s"completed${controllerInstance.toInt}"
   val maxActiveAcksPerPoll = 128
   val activeAckPollDuration = 1.second
   private val activeAckConsumer =
-messagingProvider.getConsumer(
-  config,
-  "completions",
-  s"completed${controllerInstance.toInt}",
-  maxPeek = maxActiveAcksPerPoll)
+messagingProvider.getConsumer(config, activeAckTopic, activeAckTopic, 
maxPeek = maxActiveAcksPerPoll)
+
   val activationFeed = actorSystem.actorOf(Props {
 new MessageFeed(
   "activeack",
diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 607670d..9b7aaec 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -205,14 +205,11 @@ class ShardingContainerPoolBalancer(config: WhiskConfig, 
controllerInstance: Ins
* Subscribes to active acks (completion messages from the invokers), and
* registers a handler for received active acks from invokers.
*/
+  private val activeAckTopic = s"completed${controllerInstance.toInt}"
   private val maxActiveAcksPerPoll = 128
   private val activeAckPollDuration = 1.second
   private val activeAckConsumer =
-messagingProvider.getConsumer(
-  config,
-  "completions",
-  s"completed${controllerInstance.toInt}",
-  maxPeek = maxActiveAcksPerPoll)
+messagingProvider.getConsumer(config, activeAckTopic, activeAckTopic, 
maxPeek = maxActiveAcksPerPoll)
 
   private val activationFeed = actorSystem.actorOf(Props {
 new MessageFeed(

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Narrow exclude to run needed tests. (#3367)

2018-02-28 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new e202c1f  Narrow exclude to run needed tests. (#3367)
e202c1f is described below

commit e202c1f66cf8edf9ce925152fe9f2e162b8b7a53
Author: Markus Thömmes 
AuthorDate: Wed Feb 28 12:54:13 2018 +0100

Narrow exclude to run needed tests. (#3367)

Some of our important tests do **not** run today because the exclude of 
"*Cli*" also covers "Client" tests, which is not suspected.
---
 tests/build.gradle | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/build.gradle b/tests/build.gradle
index e31f3ff..9d68ed2 100644
--- a/tests/build.gradle
+++ b/tests/build.gradle
@@ -27,7 +27,7 @@ task testLean(type: Test) {
 exclude '**/*ThrottleTests*'
 exclude '**/MaxActionDurationTests*'
 exclude '**/*ApiGwTests*'
-exclude '**/*Cli*'
+exclude '**/*WskCli*'
 }
 
 task testLeanCli(type: Test) {

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Soften DockerExampleContainerTests. (#3368)

2018-02-28 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new cc72113  Soften DockerExampleContainerTests. (#3368)
cc72113 is described below

commit cc72113d33562a1297c90f2a32d07c8537fadd04
Author: Markus Thömmes 
AuthorDate: Wed Feb 28 13:45:05 2018 +0100

Soften DockerExampleContainerTests. (#3368)

This test fails in different environments with different exceptions. This 
commit only aims to resolve the immediate issue.
---
 .../src/test/scala/actionContainers/DockerExampleContainerTests.scala | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/tests/src/test/scala/actionContainers/DockerExampleContainerTests.scala 
b/tests/src/test/scala/actionContainers/DockerExampleContainerTests.scala
index c5bfdba..a28b611 100644
--- a/tests/src/test/scala/actionContainers/DockerExampleContainerTests.scala
+++ b/tests/src/test/scala/actionContainers/DockerExampleContainerTests.scala
@@ -137,10 +137,12 @@ class DockerExampleContainerTests extends 
ActionProxyContainerTestUtils with Wsk
 
   it should "timeout bad proxy with exception" in {
 val (out, err) = withContainer("badproxy") { c =>
-  an[IllegalStateException] should be thrownBy {
+  val ex = the[Exception] thrownBy {
 val (code, out) = c.init(JsObject())
 println(code, out)
   }
+
+  ex should (be(a[TimeoutException]) or be(a[IllegalStateException]))
 }
 
 out shouldBe empty

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Make action-time-limit's system bounds configurable. (#3379)

2018-03-04 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new a6dfcc2  Make action-time-limit's system bounds configurable. (#3379)
a6dfcc2 is described below

commit a6dfcc2a0d3f06bde7483544def8efd904cf213e
Author: Markus Thömmes 
AuthorDate: Mon Mar 5 08:14:48 2018 +0100

Make action-time-limit's system bounds configurable. (#3379)
---
 ansible/roles/controller/tasks/deploy.yml|  4 
 ansible/roles/invoker/tasks/deploy.yml   |  3 +++
 common/scala/src/main/resources/application.conf |  7 +++
 .../src/main/scala/whisk/core/WhiskConfig.scala  |  1 +
 .../src/main/scala/whisk/core/entity/TimeLimit.scala | 20 +++-
 5 files changed, 26 insertions(+), 9 deletions(-)

diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index e466d8b..636937c 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -140,6 +140,10 @@
   "CONFIG_whisk_memory_max": "{{ limit_action_memory_max | default() }}"
   "CONFIG_whisk_memory_std": "{{ limit_action_memory_std | default() }}"
 
+  "CONFIG_whisk_timeLimit_min": "{{ limit_action_time_min | default() }}"
+  "CONFIG_whisk_timeLimit_max": "{{ limit_action_time_max | default() }}"
+  "CONFIG_whisk_timeLimit_std": "{{ limit_action_time_std | default() }}"
+
   "CONFIG_whisk_activation_payload_max": "{{ limit_activation_payload | 
default() }}"
 
   "RUNTIMES_MANIFEST": "{{ runtimesManifest | to_json }}"
diff --git a/ansible/roles/invoker/tasks/deploy.yml 
b/ansible/roles/invoker/tasks/deploy.yml
index 8fc7a84..bf91a50 100644
--- a/ansible/roles/invoker/tasks/deploy.yml
+++ b/ansible/roles/invoker/tasks/deploy.yml
@@ -203,6 +203,9 @@
 -e CONFIG_whisk_memory_min='{{ limit_action_memory_min | default() }}'
 -e CONFIG_whisk_memory_max='{{ limit_action_memory_max | default() }}'
 -e CONFIG_whisk_memory_std='{{ limit_action_memory_std | default() }}'
+-e CONFIG_whisk_timeLimit_min='{{ limit_action_time_min | default() }}'
+-e CONFIG_whisk_timeLimit_max='{{ limit_action_time_max | default() }}'
+-e CONFIG_whisk_timeLimit_std='{{ limit_action_time_std | default() }}'
 -e CONFIG_whisk_activation_payload_max='{{ limit_activation_payload | 
default() }}'
 -v /sys/fs/cgroup:/sys/fs/cgroup
 -v /run/runc:/run/runc
diff --git a/common/scala/src/main/resources/application.conf 
b/common/scala/src/main/resources/application.conf
index 1b9b739..0c4f91a 100644
--- a/common/scala/src/main/resources/application.conf
+++ b/common/scala/src/main/resources/application.conf
@@ -125,6 +125,13 @@ whisk {
 }
 }
 
+# action timelimit configuration
+time-limit {
+min = 100 ms
+max = 5 m
+std = 1 m
+}
+
 # action memory configuration
 memory {
 min = 128 m
diff --git a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala 
b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
index aaa6a89..b3e6826 100644
--- a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
+++ b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
@@ -242,6 +242,7 @@ object ConfigKeys {
   val kafkaTopics = s"$kafka.topics"
 
   val memory = "whisk.memory"
+  val timeLimit = "whisk.time-limit"
   val activation = "whisk.activation"
   val activationPayload = s"$activation.payload"
 
diff --git a/common/scala/src/main/scala/whisk/core/entity/TimeLimit.scala 
b/common/scala/src/main/scala/whisk/core/entity/TimeLimit.scala
index 3122e2e..4819404 100644
--- a/common/scala/src/main/scala/whisk/core/entity/TimeLimit.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/TimeLimit.scala
@@ -17,19 +17,17 @@
 
 package whisk.core.entity
 
-import scala.concurrent.duration.Duration
-import scala.concurrent.duration.DurationInt
-import scala.concurrent.duration.FiniteDuration
-import scala.concurrent.duration.MILLISECONDS
-import scala.language.postfixOps
+import pureconfig._
+
+import scala.concurrent.duration._
 import scala.util.Failure
 import scala.util.Success
 import scala.util.Try
-
 import spray.json.JsNumber
 import spray.json.JsValue
 import spray.json.RootJsonFormat
 import spray.json.deserializationError
+import whisk.core.ConfigKeys
 
 /**
  * TimeLimit encapsulates a duration for an action. The duration must be 
within a
@@ -46,10 +44,14 @@ protected[entity] class TimeLimit private (val duration: 
FiniteDuration) extends
   override def

[incubator-openwhisk] branch master updated: Change all empty collection creations to an explicit call to `empty`. (#3359)

2018-03-04 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 59abfcc  Change all empty collection creations to an explicit call to 
`empty`. (#3359)
59abfcc is described below

commit 59abfccf91b58ee39f184030374203f1bf372f2d
Author: Markus Thömmes 
AuthorDate: Mon Mar 5 08:15:31 2018 +0100

Change all empty collection creations to an explicit call to `empty`. 
(#3359)

Collection.empty is optimized for less allocations, less work in general 
and more clear to read.
---
 common/scala/src/main/scala/whisk/common/Config.scala | 2 +-
 common/scala/src/main/scala/whisk/core/WhiskConfig.scala  | 2 +-
 .../src/main/scala/whisk/core/containerpool/ContainerFactory.scala| 4 ++--
 .../scala/whisk/core/containerpool/logging/LogDriverLogStore.scala| 2 +-
 common/scala/src/main/scala/whisk/core/entity/Parameter.scala | 2 +-
 common/scala/src/main/scala/whisk/core/entity/WhiskPackage.scala  | 4 ++--
 core/controller/src/main/scala/whisk/core/controller/WebActions.scala | 4 ++--
 .../src/main/scala/whisk/core/containerpool/docker/DockerClient.scala | 4 ++--
 .../main/scala/whisk/core/containerpool/docker/DockerContainer.scala  | 4 ++--
 9 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/common/Config.scala 
b/common/scala/src/main/scala/whisk/common/Config.scala
index 128c224..8289f4a 100644
--- a/common/scala/src/main/scala/whisk/common/Config.scala
+++ b/common/scala/src/main/scala/whisk/common/Config.scala
@@ -42,7 +42,7 @@ import scala.util.Try
  * @param optionalProperties a Set of optional properties which may or not be 
defined.
  * @param env an optional environment to read from (defaults to sys.env).
  */
-class Config(requiredProperties: Map[String, String], optionalProperties: 
Set[String] = Set())(
+class Config(requiredProperties: Map[String, String], optionalProperties: 
Set[String] = Set.empty)(
   env: Map[String, String] = sys.env)(implicit logging: Logging) {
 
   private val settings = getProperties().toMap.filter {
diff --git a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala 
b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
index b3e6826..1ff53fb 100644
--- a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
+++ b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
@@ -34,7 +34,7 @@ import whisk.common.{Config, Logging}
  * @param env an optional environment to initialize from.
  */
 class WhiskConfig(requiredProperties: Map[String, String],
-  optionalProperties: Set[String] = Set(),
+  optionalProperties: Set[String] = Set.empty,
   propertiesFile: File = null,
   env: Map[String, String] = sys.env)(implicit logging: 
Logging)
 extends Config(requiredProperties, optionalProperties)(env) {
diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/ContainerFactory.scala 
b/common/scala/src/main/scala/whisk/core/containerpool/ContainerFactory.scala
index fd19495..17860c0 100644
--- 
a/common/scala/src/main/scala/whisk/core/containerpool/ContainerFactory.scala
+++ 
b/common/scala/src/main/scala/whisk/core/containerpool/ContainerFactory.scala
@@ -28,8 +28,8 @@ import whisk.core.entity.InstanceId
 import whisk.spi.Spi
 
 case class ContainerArgsConfig(network: String,
-   dnsServers: Seq[String] = Seq(),
-   extraArgs: Map[String, Set[String]] = Map())
+   dnsServers: Seq[String] = Seq.empty,
+   extraArgs: Map[String, Set[String]] = Map.empty)
 
 /**
  * An abstraction for Container creation
diff --git 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/LogDriverLogStore.scala
 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/LogDriverLogStore.scala
index ea1576c..5320d4d 100644
--- 
a/common/scala/src/main/scala/whisk/core/containerpool/logging/LogDriverLogStore.scala
+++ 
b/common/scala/src/main/scala/whisk/core/containerpool/logging/LogDriverLogStore.scala
@@ -38,7 +38,7 @@ import scala.concurrent.Future
 class LogDriverLogStore(actorSystem: ActorSystem) extends LogStore {
 
   /** Indicate --log-driver and --log-opt flags via 
ContainerArgsConfig.extraArgs */
-  override def containerParameters = Map()
+  override def containerParameters = Map.empty
 
   def collectLogs(transid: TransactionId,
   user: Identity,
diff --git a/common/scala/src/main/scala/whisk/core/entity/Parameter.scala 
b/common/scala/src/main/scala/whisk/core/entity/Parameter.scala
index 18ddb6e..c27744e 100644
--- a/common/scala/src/main/scala/whisk/core/entity/Parameter.scala
+++ b/common/scala/src/main/scala/whisk/core/entity/Parameter.scala
@@ -161,7 +161,7

[incubator-openwhisk] branch master updated: Turn off Kafka auto-commits. (#3393)

2018-03-06 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 593ec67  Turn off Kafka auto-commits. (#3393)
593ec67 is described below

commit 593ec679310f698517a3fb9bf323c229a4f8acee
Author: Markus Thömmes 
AuthorDate: Tue Mar 6 11:34:19 2018 +0100

Turn off Kafka auto-commits. (#3393)

We commit manually everywhere in our code. Some tests also rely on the 
manual commits to be the only ones around. We should be able to turn this off 
safely. If not we have a bug.
---
 common/scala/src/main/resources/application.conf | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/common/scala/src/main/resources/application.conf 
b/common/scala/src/main/resources/application.conf
index 0c4f91a..b1d1163 100644
--- a/common/scala/src/main/resources/application.conf
+++ b/common/scala/src/main/resources/application.conf
@@ -66,8 +66,7 @@ whisk {
 consumer {
 session-timeout-ms = 3
 heartbeat-interval-ms = 1
-enable-auto-commit = true
-auto-commit-interval-ms = 1
+enable-auto-commit = false
 auto-offset-reset = earliest
 max-poll-interval = 36
 // This value controls the server-side wait time which affects 
polling latency.

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Use ShardingContainerPoolBalancer as the default. (#3400)

2018-03-08 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new b163f80  Use ShardingContainerPoolBalancer as the default. (#3400)
b163f80 is described below

commit b163f805c428af7c87d1ac8b78e0283619ce75ec
Author: Markus Thömmes 
AuthorDate: Thu Mar 8 10:48:13 2018 +0100

Use ShardingContainerPoolBalancer as the default. (#3400)
---
 common/scala/src/main/resources/reference.conf | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/common/scala/src/main/resources/reference.conf 
b/common/scala/src/main/resources/reference.conf
index 4530aef..9821b8f 100644
--- a/common/scala/src/main/resources/reference.conf
+++ b/common/scala/src/main/resources/reference.conf
@@ -3,5 +3,5 @@ whisk.spi{
   MessagingProvider = whisk.connector.kafka.KafkaMessagingProvider
   ContainerFactoryProvider = 
whisk.core.containerpool.docker.DockerContainerFactoryProvider
   LogStoreProvider = 
whisk.core.containerpool.logging.DockerToActivationLogStoreProvider
-  LoadBalancerProvider = whisk.core.loadBalancer.ContainerPoolBalancer
+  LoadBalancerProvider = whisk.core.loadBalancer.ShardingContainerPoolBalancer
 }

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Cleanup script for unused entities in the whisks database. (#3382)

2018-03-09 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new fbc0091  Cleanup script for unused entities in the whisks database. 
(#3382)
fbc0091 is described below

commit fbc009170295a23473b1ee390985d4b4dda4aad0
Author: Steffen Rost 
AuthorDate: Fri Mar 9 16:08:11 2018 +0100

Cleanup script for unused entities in the whisks database. (#3382)

python script to cleanup whisk db if namespace does not exist anymore

Co-authored-by: Christian Bickel 
---
 .../database/test/CleanUpWhisksDbSkriptTests.scala | 279 +
 tools/db/cleanUpWhisks.py  | 150 +++
 2 files changed, 429 insertions(+)

diff --git 
a/tests/src/test/scala/whisk/core/database/test/CleanUpWhisksDbSkriptTests.scala
 
b/tests/src/test/scala/whisk/core/database/test/CleanUpWhisksDbSkriptTests.scala
new file mode 100644
index 000..1df1a48
--- /dev/null
+++ 
b/tests/src/test/scala/whisk/core/database/test/CleanUpWhisksDbSkriptTests.scala
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package whisk.core.database.test
+
+import java.io.File
+import java.time.Instant
+import java.time.temporal.ChronoUnit
+
+import common.{StreamLogging, TestUtils, WhiskProperties, WskActorSystem}
+import org.junit.runner.RunWith
+import org.scalatest.junit.JUnitRunner
+import org.scalatest.{FlatSpec, Matchers}
+import pureconfig.loadConfigOrThrow
+import spray.json._
+import spray.json.DefaultJsonProtocol._
+import whisk.core.database.CouchDbConfig
+//import whisk.core.{ConfigKeys, WhiskConfig}
+import whisk.core.ConfigKeys
+import whisk.core.entity._
+
+@RunWith(classOf[JUnitRunner])
+class CleanUpWhisksDbSkriptTests
+extends FlatSpec
+with Matchers
+with DatabaseScriptTestUtils
+with WskActorSystem
+with StreamLogging {
+
+  val cleanupScript = 
WhiskProperties.getFileRelativeToWhiskHome("tools/db/cleanUpWhisks.py").getAbsolutePath
+  val dbConfig = loadConfigOrThrow[CouchDbConfig](ConfigKeys.couchdb)
+  val authDBName = dbConfig.databaseFor[WhiskAuth]
+
+  def runScript(dbUrl: String, whisksDbName: String, subjectsDbName: String) = 
{
+println(s"Running script: $dbUrl, $whisksDbName, $subjectsDbName")
+
+val cmd =
+  Seq(
+python,
+cleanupScript,
+"--dbUrl",
+dbUrl,
+"--dbNameWhisks",
+whisksDbName,
+"--dbNameSubjects",
+subjectsDbName,
+"--days",
+"1",
+"--docsPerRequest",
+"1")
+
+val rr = TestUtils.runCmd(0, new File("."), cmd: _*)
+
+val Seq(marked, deleted, skipped, kept) =
+  Seq("marking: ", "deleting: ", "skipping: ", "keeping: ").map { 
linePrefix =>
+rr.stdout.lines.collect {
+  case line if line.startsWith(linePrefix) => line.replace(linePrefix, 
"")
+}.toList
+  }
+
+println(s"marked:  $marked")
+println(s"deleted: $deleted")
+println(s"skipped: $skipped")
+println(s"kept:$kept")
+
+(marked, deleted, skipped, kept)
+  }
+
+  behavior of "Cleanup whisksDb script"
+
+  it should "mark documents for deletion if namespace does not exist" in {
+// Create whisks db
+val dbName = dbPrefix + "cleanup_whisks_test_mark_for_deletion"
+val client = createDatabase(dbName, None)
+
+// Create document/action with random namespace
+val documents = Map(
+  "whisksCleanTests/utils/actionName1" -> JsObject("namespace" -> 
JsString("whisksCleanTests/utils")),
+  "whisksCleanTests/utils/actionName2" -> JsObject("namespace" -> 
JsString("whisksCleanTests")),
+  "whisksCleanTests/actionName3" -> JsObject("namespace" -> 
JsString("whisksCleanTests")))

[incubator-openwhisk] branch master updated: Enable ssl on the path between edge and controllers. (#3077)

2018-03-12 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new a8476ab  Enable ssl on the path between edge and controllers. (#3077)
a8476ab is described below

commit a8476ab970b4f8804d0d26fa319fe4aaa7c9ab04
Author: Vadim Raskin 
AuthorDate: Mon Mar 12 10:15:39 2018 +0100

Enable ssl on the path between edge and controllers. (#3077)
---
 .gitignore |  4 ++
 ansible/group_vars/all | 19 ++
 ansible/roles/controller/tasks/deploy.yml  | 34 +-
 ansible/roles/nginx/tasks/deploy.yml   |  9 +++
 ansible/roles/nginx/templates/nginx.conf.j2| 12 +++-
 ansible/setup.yml  | 12 
 ansible/templates/whisk.properties.j2  |  1 +
 .../scala/src/main/scala/whisk/common/Https.scala  | 75 ++
 .../main/scala/whisk/http/BasicHttpService.scala   | 35 ++
 .../controller/src/main/resources/application.conf |  3 +
 .../scala/whisk/core/controller/Controller.scala   |  8 ++-
 .../scala/whisk/core/controller/Triggers.scala | 32 -
 .../main/scala/whisk/core/invoker/Invoker.scala|  4 +-
 tests/src/test/resources/application.conf.j2   | 13 
 tests/src/test/scala/common/rest/WskRest.scala | 62 --
 .../src/test/scala/ha/CacheInvalidationTests.scala | 34 ++
 tests/src/test/scala/ha/ShootComponentsTests.scala | 13 +++-
 tests/src/test/scala/services/HeadersTests.scala   | 14 ++--
 tools/travis/setup.sh  |  2 +-
 19 files changed, 338 insertions(+), 48 deletions(-)

diff --git a/.gitignore b/.gitignore
index 710e6f5..6a97330 100644
--- a/.gitignore
+++ b/.gitignore
@@ -60,9 +60,13 @@ ansible/db_local.ini*
 ansible/tmp/*
 ansible/roles/nginx/files/openwhisk-client*
 ansible/roles/nginx/files/*.csr
+ansible/roles/nginx/files/*.p12
 ansible/roles/nginx/files/*cert.pem
 ansible/roles/nginx/files/*p12
 ansible/roles/kafka/files/*
+ansible/roles/controller/files/*.pem
+ansible/roles/controller/files/*.key
+ansible/roles/controller/files/*.p12
 
 # .zip files must be explicited whitelisted
 *.zip
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index 3105c5b..f0dca5f 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -66,6 +66,25 @@ controller:
   loadbalancer:
 spi: "{{ controller_loadbalancer_spi | default('') }}"
   loglevel: "{{ controller_loglevel | default(whisk_loglevel) | 
default('INFO') }}"
+  protocol: "{{ controllerProtocolForSetup }}"
+  ssl:
+cn: openwhisk-controllers
+cert: "{{ controller_ca_cert | 
default('controller-openwhisk-server-cert.pem') }}"
+key: "{{ controller_key | default('controller-openwhisk-server-key.pem') 
}}"
+clientAuth: "{{ controller_client_auth | default('true') }}"
+storeFlavor: PKCS12
+keystore:
+  password: "{{ controllerKeystorePassword }}"
+  path: "/conf/{{ controllerKeystoreName }}"
+# keystore and truststore are the same as long as controller and nginx share 
the certificate
+truststore:
+  password: "{{ controllerKeystorePassword }}"
+  path: "/conf/{{ controllerKeystoreName }}"
+# move controller protocol outside to not evaluate controller variables during 
execution of setup.yml
+controllerProtocolForSetup: "{{ controller_protocol | default('https') }}"
+controllerKeystoreName: "{{ controllerKeyPrefix }}openwhisk-keystore.p12"
+controllerKeyPrefix: "controller-"
+controllerKeystorePassword: openwhisk
 
 jmx:
   basePortController: 15000
diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index 9dc2de7..68d7efa 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -51,6 +51,25 @@
 src: "{{ openwhisk_home }}/ansible/roles/kafka/files/{{ 
kafka.ssl.keystore.name }}"
 dest: "{{ controller.confdir }}/controller{{ 
groups['controllers'].index(inventory_hostname) }}"
 
+- name: copy nginx certificate keystore
+  when: controller.protocol == 'https'
+  copy:
+src: files/{{ controllerKeystoreName }}
+mode: 0666
+dest: "{{ controller.confdir }}/controller{{ 
groups['controllers'].index(inventory_hostname) }}"
+  become: "{{ controller.dir.become }}"
+
+- name: copy certificates
+  when: controller.protocol == 'https'
+  copy:
+src: "{{ openwhisk_home }}/ansible/roles/controller/files/{{ item }}"
+mode: 0666
+dest: "{{ controller.confdir }}/controller{{ 
groups[

[incubator-openwhisk] branch master updated: Send active-ack in any case of a parseable message. (#3424)

2018-03-13 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new c69b6f5  Send active-ack in any case of a parseable message. (#3424)
c69b6f5 is described below

commit c69b6f5488122705ef75c42e0a0c82ab65c7075c
Author: Markus Thömmes 
AuthorDate: Tue Mar 13 13:20:50 2018 +0100

Send active-ack in any case of a parseable message. (#3424)
---
 .../src/main/scala/whisk/http/ErrorResponse.scala  |   2 +
 .../scala/whisk/core/invoker/InvokerReactive.scala | 193 +++--
 .../whisk/core/invoker/NamespaceBlacklist.scala|   3 -
 3 files changed, 101 insertions(+), 97 deletions(-)

diff --git a/common/scala/src/main/scala/whisk/http/ErrorResponse.scala 
b/common/scala/src/main/scala/whisk/http/ErrorResponse.scala
index 0f466d3..97d2008 100644
--- a/common/scala/src/main/scala/whisk/http/ErrorResponse.scala
+++ b/common/scala/src/main/scala/whisk/http/ErrorResponse.scala
@@ -205,6 +205,8 @@ object Messages {
 }
   }
 
+  val namespacesBlacklisted = "The action was not invoked due to a blacklisted 
namespace."
+
   val actionRemovedWhileInvoking = "Action could not be found or may have been 
deleted."
   val actionMismatchWhileInvoking = "Action version is not compatible and 
cannot be invoked."
   val actionFetchErrorWhileInvoking = "Action could not be fetched."
diff --git 
a/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala 
b/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala
index afe0c89..0729103 100644
--- a/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala
+++ b/core/invoker/src/main/scala/whisk/core/invoker/InvokerReactive.scala
@@ -37,7 +37,7 @@ import whisk.core.entity.size._
 import whisk.http.Messages
 import whisk.spi.SpiLoader
 
-import scala.concurrent.Future
+import scala.concurrent.{ExecutionContext, Future}
 import scala.concurrent.duration._
 import scala.util.{Failure, Success}
 
@@ -46,8 +46,8 @@ class InvokerReactive(config: WhiskConfig, instance: 
InstanceId, producer: Messa
   logging: Logging) {
 
   implicit val materializer: ActorMaterializer = ActorMaterializer()
-  implicit val ec = actorSystem.dispatcher
-  implicit val cfg = config
+  implicit val ec: ExecutionContext = actorSystem.dispatcher
+  implicit val cfg: WhiskConfig = config
 
   private val logsProvider = 
SpiLoader.get[LogStoreProvider].logStore(actorSystem)
   logging.info(this, s"LogStoreProvider: ${logsProvider.getClass}")
@@ -59,7 +59,7 @@ class InvokerReactive(config: WhiskConfig, instance: 
InstanceId, producer: Messa
* task or actor because further operation does not make sense if something
* goes wrong here. Initialization will throw an exception upon failure.
*/
-  val containerFactory =
+  private val containerFactory =
 SpiLoader
   .get[ContainerFactoryProvider]
   .getContainerFactory(
@@ -90,26 +90,26 @@ class InvokerReactive(config: WhiskConfig, instance: 
InstanceId, producer: Messa
   }
 
   /** Initialize message consumers */
-  val topic = s"invoker${instance.toInt}"
-  val maximumContainers = config.invokerNumCore.toInt * 
config.invokerCoreShare.toInt
-  val msgProvider = SpiLoader.get[MessagingProvider]
-  val consumer = msgProvider.getConsumer(
+  private val topic = s"invoker${instance.toInt}"
+  private val maximumContainers = config.invokerNumCore.toInt * 
config.invokerCoreShare.toInt
+  private val msgProvider = SpiLoader.get[MessagingProvider]
+  private val consumer = msgProvider.getConsumer(
 config,
 topic,
 topic,
 maximumContainers,
 maxPollInterval = TimeLimit.MAX_DURATION + 1.minute)
 
-  val activationFeed = actorSystem.actorOf(Props {
+  private val activationFeed = actorSystem.actorOf(Props {
 new MessageFeed("activation", logging, consumer, maximumContainers, 
500.milliseconds, processActivationMessage)
   })
 
   /** Sends an active-ack. */
-  val ack = (tid: TransactionId,
- activationResult: WhiskActivation,
- blockingInvoke: Boolean,
- controllerInstance: InstanceId) => {
-implicit val transid = tid
+  private val ack = (tid: TransactionId,
+ activationResult: WhiskActivation,
+ blockingInvoke: Boolean,
+ controllerInstance: InstanceId) => {
+implicit val transid: TransactionId = tid
 
 def send(res: Either[ActivationId, WhiskActivation], recovery: Boolean = 
false) = {
   val msg = CompletionMessage(transid, res, instance)
@@ -129,8 +129,8 @@ class InvokerReactive(config: WhiskConfig, instance: 
InstanceId, producer: Messa
   }
 
   /** Stores an activation in the database. */
-  val store = (tid: TransactionId, activation: WhiskActivation) => {
-implicit 

[incubator-openwhisk] branch master updated: Fix negative values and blocking usage in Kafka lag monitoring. (#3434)

2018-03-14 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 9c88922  Fix negative values and blocking usage in Kafka lag 
monitoring. (#3434)
9c88922 is described below

commit 9c889222f9706f7a7b40e3ccbca78aa03f2d6d2c
Author: Markus Thömmes 
AuthorDate: Wed Mar 14 13:05:54 2018 +0100

Fix negative values and blocking usage in Kafka lag monitoring. (#3434)

1. `endOffsets` might be eventually consistent to the locally stored offset 
value. Negative values need to be normalized to 0.
2. `endOffsets` can "block indefinitly" per documentation, so we need to 
make sure to protect the execution context against thread starvation.
---
 .../connector/kafka/KafkaConsumerConnector.scala   | 25 +-
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
index fc0954e..20d5635 100644
--- 
a/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
+++ 
b/common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
@@ -25,14 +25,14 @@ import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.errors.{RetriableException, WakeupException}
 import org.apache.kafka.common.serialization.ByteArrayDeserializer
 import pureconfig.loadConfigOrThrow
-import whisk.common.{Logging, LoggingMarkers, MetricEmitter}
+import whisk.common.{Logging, LoggingMarkers, MetricEmitter, Scheduler}
 import whisk.core.ConfigKeys
 import whisk.core.connector.MessageConsumer
 
 import scala.collection.JavaConversions.{iterableAsScalaIterable, 
seqAsJavaList}
-import scala.concurrent.duration._
-import scala.concurrent.{ExecutionContext, Future}
 import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+import scala.concurrent.{blocking, ExecutionContext, Future}
 
 case class KafkaConsumerConfig(sessionTimeoutMs: Long, metricFlushIntervalS: 
Int)
 
@@ -147,13 +147,18 @@ class KafkaConsumerConnector(
 
   @volatile private var consumer = getConsumer(getProps, Some(List(topic)))
 
-  //  Read current lag of the consumed topic, e.g. invoker queue
-  //  Since we use only one partition in kafka, it is defined 0
-  actorSystem.scheduler.schedule(10.second, cfg.metricFlushIntervalS.second) {
-val topicAndPartition = Set(new TopicPartition(topic, 0))
-consumer.endOffsets(topicAndPartition.asJava).asScala.find(_._1.topic() == 
topic).map(_._2).foreach { endOffset =>
-  val queueSize = endOffset - offset
-  MetricEmitter.emitHistogramMetric(LoggingMarkers.KAFKA_QUEUE(topic), 
queueSize)
+  // Read current lag of the consumed topic, e.g. invoker queue
+  // Since we use only one partition in kafka, it is defined 0
+  Scheduler.scheduleWaitAtMost(cfg.metricFlushIntervalS.seconds, 10.seconds, 
"kafka-lag-monitor") { () =>
+Future {
+  blocking {
+val topicAndPartition = new TopicPartition(topic, 0)
+
consumer.endOffsets(Set(topicAndPartition).asJava).asScala.get(topicAndPartition).foreach
 { endOffset =>
+  // endOffset could lag behind the offset reported by the consumer 
internally resulting in negative numbers
+  val queueSize = (endOffset - offset).max(0)
+  MetricEmitter.emitHistogramMetric(LoggingMarkers.KAFKA_QUEUE(topic), 
queueSize)
+}
+  }
 }
   }
 }

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Move cleanupWhisksDb script out of core repo. (#3436)

2018-03-15 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new de7f2b6  Move cleanupWhisksDb script out of core repo. (#3436)
de7f2b6 is described below

commit de7f2b686e34411e3eae5fb36b8099a605e64e3d
Author: Steffen Rost 
AuthorDate: Thu Mar 15 13:01:56 2018 +0100

Move cleanupWhisksDb script out of core repo. (#3436)
---
 .../database/test/CleanUpWhisksDbSkriptTests.scala | 279 -
 tools/db/cleanUpWhisks.py  | 150 ---
 2 files changed, 429 deletions(-)

diff --git 
a/tests/src/test/scala/whisk/core/database/test/CleanUpWhisksDbSkriptTests.scala
 
b/tests/src/test/scala/whisk/core/database/test/CleanUpWhisksDbSkriptTests.scala
deleted file mode 100644
index 1df1a48..000
--- 
a/tests/src/test/scala/whisk/core/database/test/CleanUpWhisksDbSkriptTests.scala
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package whisk.core.database.test
-
-import java.io.File
-import java.time.Instant
-import java.time.temporal.ChronoUnit
-
-import common.{StreamLogging, TestUtils, WhiskProperties, WskActorSystem}
-import org.junit.runner.RunWith
-import org.scalatest.junit.JUnitRunner
-import org.scalatest.{FlatSpec, Matchers}
-import pureconfig.loadConfigOrThrow
-import spray.json._
-import spray.json.DefaultJsonProtocol._
-import whisk.core.database.CouchDbConfig
-//import whisk.core.{ConfigKeys, WhiskConfig}
-import whisk.core.ConfigKeys
-import whisk.core.entity._
-
-@RunWith(classOf[JUnitRunner])
-class CleanUpWhisksDbSkriptTests
-extends FlatSpec
-with Matchers
-with DatabaseScriptTestUtils
-with WskActorSystem
-with StreamLogging {
-
-  val cleanupScript = 
WhiskProperties.getFileRelativeToWhiskHome("tools/db/cleanUpWhisks.py").getAbsolutePath
-  val dbConfig = loadConfigOrThrow[CouchDbConfig](ConfigKeys.couchdb)
-  val authDBName = dbConfig.databaseFor[WhiskAuth]
-
-  def runScript(dbUrl: String, whisksDbName: String, subjectsDbName: String) = 
{
-println(s"Running script: $dbUrl, $whisksDbName, $subjectsDbName")
-
-val cmd =
-  Seq(
-python,
-cleanupScript,
-"--dbUrl",
-dbUrl,
-"--dbNameWhisks",
-whisksDbName,
-"--dbNameSubjects",
-subjectsDbName,
-"--days",
-"1",
-"--docsPerRequest",
-"1")
-
-val rr = TestUtils.runCmd(0, new File("."), cmd: _*)
-
-val Seq(marked, deleted, skipped, kept) =
-  Seq("marking: ", "deleting: ", "skipping: ", "keeping: ").map { 
linePrefix =>
-rr.stdout.lines.collect {
-  case line if line.startsWith(linePrefix) => line.replace(linePrefix, 
"")
-}.toList
-  }
-
-println(s"marked:  $marked")
-println(s"deleted: $deleted")
-println(s"skipped: $skipped")
-println(s"kept:$kept")
-
-(marked, deleted, skipped, kept)
-  }
-
-  behavior of "Cleanup whisksDb script"
-
-  it should "mark documents for deletion if namespace does not exist" in {
-// Create whisks db
-val dbName = dbPrefix + "cleanup_whisks_test_mark_for_deletion"
-val client = createDatabase(dbName, None)
-
-// Create document/action with random namespace
-val documents = Map(
-  "whisksCleanTests/utils/actionName1" -> JsObject("namespace" -> 
JsString("whisksCleanTests/utils")),
-  "whisksCleanTests/utils/actionName2" -> JsObject("namespace" -> 
JsString("whisksCleanTests")),
-  "whisksCleanTests/actionName3" -> JsObject("namespace" -> 
JsString("whisksCleanTests")))
-
-documents.foreach {
-  case (id, document) =>
-client.putDoc(id, document).futureValue
-}
-
-// execute script
-  

[incubator-openwhisk] branch master updated: Retry in feed tests to avoid race condition. (#3480)

2018-03-23 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 4c84f1a  Retry in feed tests to avoid race condition. (#3480)
4c84f1a is described below

commit 4c84f1a629601ca818564f086866708a8ca06140
Author: Vadim Raskin 
AuthorDate: Fri Mar 23 14:57:39 2018 +0100

Retry in feed tests to avoid race condition. (#3480)
---
 tests/src/test/scala/whisk/core/cli/test/WskEntitlementTests.scala | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/tests/src/test/scala/whisk/core/cli/test/WskEntitlementTests.scala 
b/tests/src/test/scala/whisk/core/cli/test/WskEntitlementTests.scala
index 99e839f..0a6174a 100644
--- a/tests/src/test/scala/whisk/core/cli/test/WskEntitlementTests.scala
+++ b/tests/src/test/scala/whisk/core/cli/test/WskEntitlementTests.scala
@@ -31,6 +31,8 @@ import spray.json._
 import spray.json.DefaultJsonProtocol._
 import whisk.core.entity.Subject
 import whisk.core.entity.WhiskPackage
+import whisk.utils.retry
+import scala.concurrent.duration._
 
 @RunWith(classOf[JUnitRunner])
 abstract class WskEntitlementTests extends TestHelpers with WskTestHelpers 
with BeforeAndAfterAll {
@@ -366,7 +368,8 @@ abstract class WskEntitlementTests extends TestHelpers with 
WskTestHelpers with
 assetHelper.withCleaner(wsk.trigger, "badfeed", confirmDelete = false) 
{ (trigger, name) =>
   trigger.create(name, feed = Some(fullyQualifiedFeedName), 
expectedExitCode = timeoutCode)(wp)
 }
-wsk.trigger.get("badfeed", expectedExitCode = notFoundCode)(wp)
+// with several active controllers race condition with cache 
invalidation might occur, thus retry
+retry(wsk.trigger.get("badfeed", expectedExitCode = notFoundCode)(wp), 
10, Some(500.milliseconds))
   }
   }
 

-- 
To stop receiving notification emails like this one, please contact
cbic...@apache.org.


[incubator-openwhisk] branch master updated: Ansible image name overrides and bump NGinx to 1.12. (#3363)

2018-04-03 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new 928bed4  Ansible image name overrides and bump NGinx to 1.12. (#3363)
928bed4 is described below

commit 928bed4798d301cdeba60eca788dca66c4e49592
Author: Jonathan Springer 
AuthorDate: Tue Apr 3 08:54:25 2018 -0400

Ansible image name overrides and bump NGinx to 1.12. (#3363)

This is an incremental implementation of #3133 to reduce risk and 
complexity as we implement multi-architecture OpenWhisk. This PR includes these 
changes:

Add the ability to override image names to the ansible playbooks -- in 
support of overrides of images for multi-platform deployments.
Bump Nginx version to 1.12 -- latest stable version, available as a 
multi-platform manifest from DockerHub (current Nginx 1.11 is not).
Move responsiblity for docker pull to the docker module, reducing ansible 
steps and complexity -- improved maintainability.
---
 ansible/group_vars/all|  2 +-
 ansible/roles/apigateway/tasks/deploy.yml | 12 
 ansible/roles/couchdb/tasks/deploy.yml| 14 +++---
 ansible/roles/kafka/tasks/deploy.yml  | 14 +++---
 4 files changed, 19 insertions(+), 23 deletions(-)

diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index cd4eeba..936a704 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -170,7 +170,7 @@ nginx:
   confdir: "{{ config_root_dir }}/nginx"
   dir:
 become: "{{ nginx_dir_become | default(false) }}"
-  version: "{{ nginx_version | default(1.11) }}"
+  version: "{{ nginx_version | default('1.12') }}"
   port:
 http: 80
 api: 443
diff --git a/ansible/roles/apigateway/tasks/deploy.yml 
b/ansible/roles/apigateway/tasks/deploy.yml
index cc77720..58bcb12 100644
--- a/ansible/roles/apigateway/tasks/deploy.yml
+++ b/ansible/roles/apigateway/tasks/deploy.yml
@@ -1,17 +1,10 @@
 ---
 # This role will install apigateway
 
-- name: "pull the openwhisk/apigateway:{{ apigateway.version }} image"
-  shell: "docker pull openwhisk/apigateway:{{ apigateway.version }}"
-  when: apigateway_local_build is undefined
-  retries: "{{ docker.pull.retries }}"
-  delay: "{{ docker.pull.delay }}"
-
-#temporary pinned to image 0.7.0 until apigateway fixes regressions
 - name: (re)start apigateway
   docker_container:
 name: apigateway
-image: openwhisk/apigateway:{{ apigateway.version }}
+image: "{{ apigateway.docker_image | default('openwhisk/apigateway:' ~ 
apigateway.version) }}"
 state: started
 recreate: true
 restart_policy: "{{ docker.restart.policy }}"
@@ -24,6 +17,9 @@
 ports:
   - "{{ apigateway.port.mgmt }}:8080"
   - "{{ apigateway.port.api }}:9000"
+pull: "{{ apigateway_local_build is undefined }}"
+  retries: "{{ docker.pull.retries }}"
+  delay: "{{ docker.pull.delay }}"
 
 - name: wait until the API Gateway in this host is up and running
   uri:
diff --git a/ansible/roles/couchdb/tasks/deploy.yml 
b/ansible/roles/couchdb/tasks/deploy.yml
index 0dedb7c..52131a8 100644
--- a/ansible/roles/couchdb/tasks/deploy.yml
+++ b/ansible/roles/couchdb/tasks/deploy.yml
@@ -21,15 +21,12 @@
 volume_dir: "{{ instance.volume.fsmount | default( '/mnt/' + 
group_names|first, true ) }}:/usr/local/var/lib/couchdb"
   when: (block_device is defined) and (block_device in disk_status.stdout)
 
-- name: "pull the apache/couchdb:{{ couchdb.version }} image"
-  shell: "docker pull apache/couchdb:{{ couchdb.version }}"
-  retries: "{{ docker.pull.retries }}"
-  delay: "{{ docker.pull.delay }}"
-
-- name: (re)start CouchDB
+- name: "(re)start CouchDB from '{{ couchdb_image }} ' "
+  vars: 
+couchdb_image: "{{ couchdb.docker_image | default('apache/couchdb:' ~ 
couchdb.version ) }}" 
   docker_container:
 name: couchdb
-image: apache/couchdb:{{ couchdb.version }}
+image: "{{ couchdb_image }}"
 state: started
 recreate: true
 restart_policy: "{{ docker.restart.policy }}"
@@ -42,6 +39,9 @@
   COUCHDB_USER: "{{ db_username }}"
   COUCHDB_PASSWORD: "{{ db_password }}"
   NODENAME: "{{ ansible_host }}"
+pull: "{{ couchdb.pull_couchdb | default(true) }}"
+  retries: "{{ docker.pull.retries }}"
+  delay: "{{ docker.pull.delay }}"
 
 - name: wait until CouchDB in this host is up and running
   uri:
diff --git a/ansible/roles/kafka/tasks/deploy.yml 
b/ansible/roles/kafka/tasks/deploy.yml
index 4467fec..5d8cae6 100644
--- a/ansible/roles/kafka/task

[incubator-openwhisk] branch master updated: Remove deprecated loadbalancer. (#3413)

2018-04-03 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new f647224  Remove deprecated loadbalancer. (#3413)
f647224 is described below

commit f64722498bc2f5eadc356b3ae26ebb85de1bcdbf
Author: Markus Thömmes 
AuthorDate: Tue Apr 3 16:57:24 2018 +0200

Remove deprecated loadbalancer. (#3413)
---
 .../core/loadBalancer/ContainerPoolBalancer.scala  | 356 -
 .../loadBalancer/DistributedLoadBalancerData.scala |  90 --
 .../whisk/core/loadBalancer/LoadBalancer.scala |   3 +
 .../whisk/core/loadBalancer/LoadBalancerData.scala |  86 -
 .../core/loadBalancer/LocalLoadBalancerData.scala  |  76 -
 .../ShardingContainerPoolBalancer.scala|  38 ++-
 .../core/loadBalancer/SharedDataService.scala  |  98 --
 .../test/ContainerPoolBalancerObjectTests.scala| 164 --
 .../loadBalancer/test/LoadBalancerDataTests.scala  | 295 -
 .../test/ShardingContainerPoolBalancerTests.scala  |  17 +
 .../loadBalancer/test/SharedDataServiceTests.scala |  81 -
 11 files changed, 53 insertions(+), 1251 deletions(-)

diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
deleted file mode 100644
index 570281d..000
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ContainerPoolBalancer.scala
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package whisk.core.loadBalancer
-
-import java.nio.charset.StandardCharsets
-import java.util.concurrent.ThreadLocalRandom
-
-import akka.actor.{ActorSystem, Props}
-import akka.cluster.Cluster
-import akka.pattern.ask
-import akka.stream.ActorMaterializer
-import akka.util.Timeout
-import org.apache.kafka.clients.producer.RecordMetadata
-import pureconfig._
-import whisk.common.{Logging, LoggingMarkers, MetricEmitter, TransactionId}
-import whisk.core.WhiskConfig._
-import whisk.core.connector._
-import whisk.core.entity._
-import whisk.core.{ConfigKeys, WhiskConfig}
-import whisk.spi.SpiLoader
-import akka.event.Logging.InfoLevel
-import pureconfig._
-
-import scala.annotation.tailrec
-import scala.concurrent.duration._
-import scala.concurrent.{ExecutionContext, Future, Promise}
-import scala.util.{Failure, Success}
-
-case class LoadbalancerConfig(blackboxFraction: Double, invokerBusyThreshold: 
Int)
-
-class ContainerPoolBalancer(config: WhiskConfig, controllerInstance: 
InstanceId)(implicit val actorSystem: ActorSystem,
-   
  logging: Logging,
-   
  materializer: ActorMaterializer)
-extends LoadBalancer {
-
-  private val lbConfig = 
loadConfigOrThrow[LoadbalancerConfig](ConfigKeys.loadbalancer)
-
-  /** The execution context for futures */
-  private implicit val executionContext: ExecutionContext = 
actorSystem.dispatcher
-
-  private val activeAckTimeoutGrace = 1.minute
-
-  /** How many invokers are dedicated to blackbox images.  We range bound to 
something sensical regardless of configuration. */
-  private val blackboxFraction: Double = Math.max(0.0, Math.min(1.0, 
lbConfig.blackboxFraction))
-  logging.info(this, s"blackboxFraction = 
$blackboxFraction")(TransactionId.loadbalancer)
-
-  /** Feature switch for shared load balancer data **/
-  private val loadBalancerData = {
-if (config.controllerLocalBookkeeping) {
-  new LocalLoadBalancerData()
-} else {
-
-  /** Specify how seed nodes are generated */
-  val seedNodesProvider = new 
StaticSeedNodesProvider(config.controllerSeedNodes, actorSystem.name)
-  Cluster(actorSystem).joinSeedNodes(seedNodesProvider.getSeedNodes())
-  new DistributedLoadBalancerData()
-}
-  }
-
-  override def publish(action: ExecutableWhiskActionMetaData, msg: 
ActivationMessage)(
-implicit transid: TransactionId): Fut

[incubator-openwhisk] branch master updated: Remove bookkeeping setting which is no longer applicable. (#3547)

2018-04-13 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new d99a272  Remove bookkeeping setting which is no longer applicable. 
(#3547)
d99a272 is described below

commit d99a2729467e31157bc362154e6955e819d30c87
Author: Markus Thömmes 
AuthorDate: Fri Apr 13 13:58:22 2018 +0200

Remove bookkeeping setting which is no longer applicable. (#3547)

This is essentially a leftover of the removal of the old loadbalancer.
---
 ansible/roles/controller/tasks/deploy.yml | 1 -
 common/scala/src/main/scala/whisk/core/WhiskConfig.scala  | 2 --
 .../scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala | 4 +---
 docs/deploy.md| 4 ++--
 4 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/ansible/roles/controller/tasks/deploy.yml 
b/ansible/roles/controller/tasks/deploy.yml
index b9e8c78..87167f5 100644
--- a/ansible/roles/controller/tasks/deploy.yml
+++ b/ansible/roles/controller/tasks/deploy.yml
@@ -171,7 +171,6 @@
   "CONFIG_whisk_runtimes_bypassPullForLocalImages": "{{ 
runtimes_bypass_pull_for_local_images | default() }}"
   "CONFIG_whisk_runtimes_localImagePrefix": "{{ 
runtimes_local_image_prefix | default() }}"
 
-  "CONTROLLER_LOCALBOOKKEEPING": "{{ controller.localBookkeeping }}"
   "AKKA_CLUSTER_SEED_NODES": "{{seed_nodes_list | join(' ') }}"
   "METRICS_KAMON": "{{ metrics.kamon.enabled }}"
   "METRICS_KAMON_TAGS": "{{ metrics.kamon.tags }}"
diff --git a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala 
b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
index eb22e9d..c9683d6 100644
--- a/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
+++ b/common/scala/src/main/scala/whisk/core/WhiskConfig.scala
@@ -84,7 +84,6 @@ class WhiskConfig(requiredProperties: Map[String, String],
   val actionInvokeSystemOverloadLimit = 
this(WhiskConfig.actionInvokeSystemOverloadLimit)
   val actionSequenceLimit = this(WhiskConfig.actionSequenceMaxLimit)
   val controllerSeedNodes = this(WhiskConfig.controllerSeedNodes)
-  val controllerLocalBookkeeping = 
getAsBoolean(WhiskConfig.controllerLocalBookkeeping, false)
 }
 
 object WhiskConfig {
@@ -203,7 +202,6 @@ object WhiskConfig {
   val actionInvokeSystemOverloadLimit = 
"limits.actions.invokes.concurrentInSystem"
   val triggerFirePerMinuteLimit = "limits.triggers.fires.perMinute"
   val controllerSeedNodes = "akka.cluster.seed.nodes"
-  val controllerLocalBookkeeping = "controller.localBookkeeping"
 }
 
 object ConfigKeys {
diff --git 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 718079e..d10f03e 100644
--- 
a/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ 
b/core/controller/src/main/scala/whisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -305,9 +305,7 @@ object ShardingContainerPoolBalancer extends 
LoadBalancerProvider {
 logging: Logging,
 materializer: ActorMaterializer): LoadBalancer = new 
ShardingContainerPoolBalancer(whiskConfig, instance)
 
-  def requiredProperties: Map[String, String] =
-kafkaHosts ++
-  Map(controllerLocalBookkeeping -> null, controllerSeedNodes -> null)
+  def requiredProperties: Map[String, String] = kafkaHosts ++ 
Map(controllerSeedNodes -> null)
 
   /** Generates a hash based on the string representation of namespace and 
action */
   def generateHash(namespace: EntityName, action: FullyQualifiedEntityName): 
Int = {
diff --git a/docs/deploy.md b/docs/deploy.md
index 49c2636..1ad8456 100644
--- a/docs/deploy.md
+++ b/docs/deploy.md
@@ -2,7 +2,7 @@ This page documents configuration options that should be 
considered when deployi
 
 # Controller Clustering
 
-The system can be configured to use Akka clustering to manage the distributed 
state of the Contoller's load balancing algorithm.  This imposes the following 
constriaints on a deployment
+The system can be configured to use Akka clustering to manage the distributed 
state of the Contoller's load balancing algorithm.  This imposes the following 
constraints on a deployment
 
 
 ## Controller nodes must have static IPs/Port combination.
@@ -19,7 +19,7 @@ https://doc.akka.io/docs/akka/2.5.4/scala/cluster-usage.html
 
 ## Shared state vs. Sharding
 
-OpenWhisk supports both a shared state and a sharding model. By default the 
shared-state loadbalancer is used. The sharding loadbalancer 

[incubator-openwhisk] branch master updated: Bump timeout for creating a container to 60 seconds in tests. (#3551)

2018-04-16 Thread cbickel
This is an automated email from the ASF dual-hosted git repository.

cbickel pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
 new fb60baf   Bump timeout for creating a container to 60 seconds in 
tests. (#3551)
fb60baf is described below

commit fb60baf0fb523f64409632a0460d50103b4f8111
Author: Markus Thömmes 
AuthorDate: Mon Apr 16 13:12:37 2018 +0200

 Bump timeout for creating a container to 60 seconds in tests. (#3551)

* Bump timeout for creating a container to 60 seconds in tests.

On very busy machines, creating a container can take quite long. For the 
sake of testing, its of no use to fail the test on a relatively tight timeout. 
Bumping this to 60 seconds should not harm the tests significance.

* Style cleanup of that test file.
---
 .../scala/actionContainers/ActionContainer.scala   | 57 ++
 1 file changed, 26 insertions(+), 31 deletions(-)

diff --git a/tests/src/test/scala/actionContainers/ActionContainer.scala 
b/tests/src/test/scala/actionContainers/ActionContainer.scala
index ccd6748..cba29d9 100644
--- a/tests/src/test/scala/actionContainers/ActionContainer.scala
+++ b/tests/src/test/scala/actionContainers/ActionContainer.scala
@@ -27,7 +27,6 @@ import scala.concurrent.Future
 import scala.concurrent.blocking
 import scala.concurrent.duration.Duration
 import scala.concurrent.duration.DurationInt
-import scala.language.postfixOps
 import scala.sys.process.ProcessLogger
 import scala.sys.process.stringToProcess
 import scala.util.Random
@@ -53,19 +52,20 @@ trait ActionContainer {
 trait ActionProxyContainerTestUtils extends FlatSpec with Matchers {
   import ActionContainer.{filterSentinel, sentinel}
 
-  def initPayload(code: String, main: String = "main") = {
+  def initPayload(code: String, main: String = "main"): JsObject =
 JsObject(
   "value" -> JsObject(
 "code" -> { if (code != null) JsString(code) else JsNull },
 "main" -> JsString(main),
 "binary" -> JsBoolean(Exec.isBinaryCode(code
-  }
 
-  def runPayload(args: JsValue, other: Option[JsObject] = None) = {
+  def runPayload(args: JsValue, other: Option[JsObject] = None): JsObject =
 JsObject(Map("value" -> args) ++ (other map { _.fields } getOrElse Map()))
-  }
 
-  def checkStreams(out: String, err: String, additionalCheck: (String, String) 
=> Unit, sentinelCount: Int = 1) = {
+  def checkStreams(out: String,
+   err: String,
+   additionalCheck: (String, String) => Unit,
+   sentinelCount: Int = 1): Unit = {
 withClue("expected number of stdout sentinels") {
   sentinelCount shouldBe StringUtils.countMatches(out, sentinel)
 }
@@ -74,26 +74,24 @@ trait ActionProxyContainerTestUtils extends FlatSpec with 
Matchers {
 }
 
 val (o, e) = (filterSentinel(out), filterSentinel(err))
-o should not include (sentinel)
-e should not include (sentinel)
+o should not include sentinel
+e should not include sentinel
 additionalCheck(o, e)
   }
 }
 
 object ActionContainer {
   private lazy val dockerBin: String = {
-List("/usr/bin/docker", "/usr/local/bin/docker")
-  .find { bin =>
-new File(bin).isFile()
-  }
-  .getOrElse(???) // This fails if the docker binary couldn't be located.
+List("/usr/bin/docker", "/usr/local/bin/docker").find { bin =>
+  new File(bin).isFile
+}.get // This fails if the docker binary couldn't be located.
   }
 
   private lazy val dockerCmd: String = {
 val version = WhiskProperties.getProperty("whisk.version.name")
 // Check if we are running on docker-machine env.
 val hostStr = if (version.toLowerCase().contains("mac")) {
-  s" --host tcp://${WhiskProperties.getMainDockerEndpoint()} "
+  s" --host tcp://${WhiskProperties.getMainDockerEndpoint} "
 } else {
   " "
 }
@@ -109,7 +107,7 @@ object ActionContainer {
   val err = new ByteArrayOutputStream
   val outW = new PrintWriter(out)
   val errW = new PrintWriter(err)
-  val v = cmd ! (ProcessLogger(outW.println, errW.println))
+  val v = cmd ! ProcessLogger(o => outW.println(o), e => errW.println(e))
   outW.close()
   errW.close()
   (v, out.toString, err.toString)
@@ -125,21 +123,23 @@ object ActionContainer {
 
   // Filters out the sentinel markers inserted by the container (see relevant 
private code in Invoker.scala)
   val sentinel = "XXX_THE_END_OF_A_WHISK_ACTIVATION_XXX"
-  def filterSentinel(str: String) = str.replaceAll(sentinel, "").trim
+  def filterSentinel(str: String): String = str.replaceAll(sentinel, "&quo

  1   2   >