markusthoemmes commented on a change in pull request #3072: Enhance kafka
message provider
URL:
https://github.com/apache/incubator-openwhisk/pull/3072#discussion_r160149157
##########
File path:
common/scala/src/main/scala/whisk/connector/kafka/KafkaConsumerConnector.scala
##########
@@ -21,46 +21,106 @@ import java.util.Properties
import scala.collection.JavaConversions.iterableAsScalaIterable
import scala.collection.JavaConversions.seqAsJavaList
-import scala.concurrent.duration.Duration
-import scala.concurrent.duration.DurationInt
-import scala.concurrent.duration.FiniteDuration
-
+import scala.concurrent.duration._
+import scala.concurrent.Future
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.common.serialization.ByteArrayDeserializer
-
+import org.apache.kafka.common.errors.{InvalidMetadataException,
WakeupException}
+import akka.actor.ActorSystem
+import org.apache.kafka.common.KafkaException
import whisk.common.Logging
import whisk.core.connector.MessageConsumer
class KafkaConsumerConnector(kafkahost: String,
groupid: String,
topic: String,
+ actorSystem: ActorSystem,
override val maxPeek: Int = Int.MaxValue,
readeos: Boolean = true,
sessionTimeout: FiniteDuration = 30.seconds,
autoCommitInterval: FiniteDuration = 10.seconds,
maxPollInterval: FiniteDuration =
5.minutes)(implicit logging: Logging)
extends MessageConsumer {
+ implicit val ec = actorSystem.dispatcher
/**
* Long poll for messages. Method returns once message are available but no
later than given
* duration.
*
* @param duration the maximum duration for the long poll
*/
- override def peek(duration: Duration = 500.milliseconds) = {
- val records = consumer.poll(duration.toMillis)
- records map { r =>
- (r.topic, r.partition, r.offset, r.value)
+ override def peek(duration: Duration = 500.milliseconds,
+ retry: Int = 3): Iterable[(String, Int, Long,
Array[Byte])] = {
+ // Since kafka client can be infinitely blocked in poll(timeout), we
should interrupt it by wakeup() method
+ val wakeUpTask = actorSystem.scheduler.scheduleOnce(sessionTimeout) {
+ consumer.wakeup()
}
+
+ try {
+ val records = consumer.poll(duration.toMillis)
+ records map { r =>
+ (r.topic, r.partition, r.offset, r.value)
+ }
+ } catch {
+
+ /**
+ * Kafka Error handling strategy
+ *
+ * 1. Error we know of can be handled by retry
+ * - WakeupException: Exception we raise intentionally to prevent
hanging in consumer.poll(timeout)
+ * - InvalidMetadataException: An exception that may indicate the
client's metadata is out of date,
+ * includes NetworkException,
KafkaStorageException, LeaderNotAvailableException,
+ * NotLeaderForPartitionException,
UnknownTopicOrPartitionException.
+ *
+ * 2. Errors we don't know of just result in a restart of the
consumer/producer
+ *
+ */
+ case e: KafkaException =>
+ e match {
+ case _: WakeupException =>
+ if (retry > 0) {
+ logging.error(this, s"Poll timeout occurred, remain $retry
retries to peek")
+ peek(duration, retry - 1)
+ } else {
+ recreateConsumer()
+ throw e
+ }
+ case _: InvalidMetadataException =>
+ if (retry > 0) {
+ logging.error(this, s"${e.getMessage}: remain $retry retries to
peek")
+ wakeUpTask.cancel()
Review comment:
Isn't this redundant as it's again handled in `finally`?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services