Ottomata has submitted this change and it was merged.

Change subject: Updating varnishkafka module with recent varnishkafka.conf 
changes
......................................................................


Updating varnishkafka module with recent varnishkafka.conf changes

Change-Id: Iadea0c92c0f9ee9fee88d9411a794739817db904
---
M manifests/defaults.pp
M manifests/init.pp
M templates/varnishkafka.conf.erb
3 files changed, 30 insertions(+), 26 deletions(-)

Approvals:
  Ottomata: Verified; Looks good to me, approved



diff --git a/manifests/defaults.pp b/manifests/defaults.pp
index 756c444..950500c 100644
--- a/manifests/defaults.pp
+++ b/manifests/defaults.pp
@@ -22,11 +22,12 @@
     $varnish_opts                   = {
         'm' => 'RxRequest:^(?!PURGE$)',
     }
-    $log_data_copy                  = true
+
     $tag_size_max                   = 2048
-    $log_line_scratch_size          = 4096
-    $log_hash_size                  = 5000
-    $log_hash_max                   = 5
+    $logline_line_scratch_size      = 4096
+    $logline_hash_size              = 5000
+    $logline_hash_max               = 5
+    $logline_data_copy              = true
 
     $log_level                      = 6
     $log_stderr                     = false
diff --git a/manifests/init.pp b/manifests/init.pp
index 1b9898f..50c93c2 100644
--- a/manifests/init.pp
+++ b/manifests/init.pp
@@ -30,24 +30,24 @@
 # $topic_request_required_acks      - Required ack level.  Default: 1
 # $topic_message_timeout_ms         - Local message timeout (milliseconds).
 #                                     Default: 60000
-# $compression_codec                - Compression codec to use when sending 
batched messages
+# $compression_codec                - Compression codec to use when sending 
batched messages to
 #                                     Kafka.  Valid values are 'none', 'gzip', 
and 'snappy'.
 #                                     Default: none
 # $varnish_opts                     - Arbitrary hash of varnish CLI options.
 #                                     Default: { 'm' => 
'RxRequest:^(?!PURGE$)' }
-# $log_data_copy                    - If true, log tag data read from VSL files
-#                                     should be copied instantly when read.  
Default true.
 # $tag_size_max                     - Maximum size of an individual field.  
Field will be truncated
 #                                     if it is larger than this.  Default: 2048
-# $log_line_scratch_size            - Size of static log line buffer.  If a 
line is larger than
+# $logline_line_scratch_size        - Size of static log line buffer.  If a 
line is larger than
 #                                     this buffer, temp buffers will be 
allocated.  Set this
 #                                     slighly larger than your expected line 
size.
 #                                     Default: 4096
-# $log_hash_size                    - Number of hash buckets.  Set this to 
avg_requests_per_second / 5.
+# $logline_hash_size                - Number of hash buckets.  Set this to 
avg_requests_per_second / 5.
 #                                     Default: 5000
-# $log_hash_max                     - Max number of log lines / bucket.  Set 
this to
+# $logline_hash_max                 - Max number of log lines / bucket.  Set 
this to
 #                                     avg_requests_per_second / $log_hash_size.
 #                                     Default: 5
+# $logline_data_copy                - If true, log tag data read from VSL files
+#                                     should be copied instantly when read.  
Default true.
 # $log_level                        - varnishkafka log level.  Default 6 
(info).
 # $log_stderr                       - Boolean.  Whether to log to stderr.  
Default: true
 # $log_syslog                       - Boolean.  Whether to log to syslog.  
Default: true
@@ -75,11 +75,11 @@
     $compression_codec              = 
$varnishkafka::defaults::compression_codec,
 
     $varnish_opts                   = $varnishkafka::defaults::varnish_opts,
-    $log_data_copy                  = $varnishkafka::defaults::log_data_copy,
     $tag_size_max                   = $varnishkafka::defaults::tag_size_max,
-    $log_line_scratch_size          = 
$varnishkafka::defaults::log_line_scratch_size,
-    $log_hash_size                  = $varnishkafka::defaults::log_hash_size,
-    $log_hash_max                   = $varnishkafka::defaults::log_hash_max,
+    $logline_line_scratch_size      = 
$varnishkafka::defaults::logline_line_scratch_size,
+    $logline_hash_size              = 
$varnishkafka::defaults::logline_hash_size,
+    $logline_hash_max               = 
$varnishkafka::defaults::logline_hash_max,
+    $logline_data_copy              = 
$varnishkafka::defaults::logline_data_copy,
 
     $log_level                      = $varnishkafka::defaults::log_level,
     $log_stderr                     = $varnishkafka::defaults::log_stderr,
diff --git a/templates/varnishkafka.conf.erb b/templates/varnishkafka.conf.erb
index 4daaa34..047d06a 100644
--- a/templates/varnishkafka.conf.erb
+++ b/templates/varnishkafka.conf.erb
@@ -126,7 +126,7 @@
 # allocating tmpbufs, a tmpbuf only lives for the current request and is then
 # freed, so it is a little more costly than using thestatic scratch pad.
 # Defaults to 4096 bytes.
-log.line.scratch.size = <%= @log_line_scratch_size %>
+logline.scratch.size = <%= @logline_scratch_size %>
 
 
 # Logline cache hash tuning
@@ -137,13 +137,13 @@
 # Higher number yields more performance at the expense of memory.
 # Set this to avg_requests_per_second / 5.
 # Defaults to 5000
-log.hash.size = <%= @log_hash_size %>
+logline.hash.size = <%= @logline_hash_size %>
 
 # Maximum number of loglines per hash bucket
 # Higher number yields less memory consumption at the expense of performance.
 # Set this to avg_requests_per_second / log.hash.size.
 # Defaults to 5
-log.hash.max = <%= @log_hash_max %>
+logline.hash.max = <%= @logline_hash_max %>
 
 
 # EXPERIMENTAL
@@ -155,7 +155,7 @@
 # NOTE:
 #   Must be set to true for offline files (-r file..) due to the way
 #   libvarnishapi reads its data.
-log.data.copy = true
+logline.data.copy = <%= @logline_data_copy %>
 
 
 #
@@ -215,6 +215,9 @@
 #                                                                     #
 # Kafka configuration                                                 #
 #                                                                     #
+# Kafka configuration properties are prefixed with "kafka."           #
+# and topic properties are prefixed with "kafka.topic.".              #
+#                                                                     #
 # For the full range of Kafka handle and topic configuration          #
 # properties, see:                                                    #
 #  https://github.com/edenhill/librdkafka/blob/master/rdkafka.h       #
@@ -225,14 +228,14 @@
 #######################################################################
 
 # Initial list of kafka brokers
-metadata.broker.list = <%= Array(@brokers).join(',') %>
+kafka.metadata.broker.list = <%= Array(@brokers).join(',') %>
 
 # Maximum number of messages allowed on the local producer queue
 # Defaults to 1000000
-queue.buffering.max.messages = <%= @queue_buffering_max_messages %>
+kafka.queue.buffering.max.messages = <%= @queue_buffering_max_messages %>
 
 # Maximum number of retries per messageset.
-message.send.max.retries = <%= @message_send_max_retries %>
+kafka.message.send.max.retries = <%= @message_send_max_retries %>
 
 
 #
@@ -240,18 +243,18 @@
 #
 
 # Topic to produce messages to
-topic = <%= @topic %>
+kafka.topic = <%= @topic %>
 
 # Partition (-1: random, else one of the available partitions)
-partition = <%= @partition %>
+kafka.partition = <%= @partition %>
 
 # Required number of acks
-topic.request.required.acks = <%= @topic_request_required_acks %>
+kafka.topic.request.required.acks = <%= @topic_request_required_acks %>
 
 # Local message timeout (milliseconds)
-topic.message.timeout.ms = <%= topic_message_timeout_ms %>
+kafka.topic.message.timeout.ms = <%= topic_message_timeout_ms %>
 
 # Use compression when sending to Kafka..  Default is none.
 # Valid values are 'none', 'gzip', and 'snappy'.
-compression.codec = <%= @compression_codec %>
+kafka.compression.codec = <%= @compression_codec %>
 <% end -%>

-- 
To view, visit https://gerrit.wikimedia.org/r/94164
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: merged
Gerrit-Change-Id: Iadea0c92c0f9ee9fee88d9411a794739817db904
Gerrit-PatchSet: 1
Gerrit-Project: operations/puppet/varnishkafka
Gerrit-Branch: master
Gerrit-Owner: Ottomata <[email protected]>
Gerrit-Reviewer: Ottomata <[email protected]>

_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to