This is an automated email from the ASF dual-hosted git repository.

ywkim pushed a commit to branch cnb
in repository https://gitbox.apache.org/repos/asf/bigtop.git


The following commit(s) were added to refs/heads/cnb by this push:
     new dc89387  BIGTOP-3249: Refine deployment of Kafka cluster
dc89387 is described below

commit dc8938799161f12e2a11159736f3f8fbd6d5d9a5
Author: Youngwoo Kim <[email protected]>
AuthorDate: Tue Oct 15 17:04:57 2019 +0900

    BIGTOP-3249: Refine deployment of Kafka cluster
---
 README.md                                          |  37 ++
 .../smoke-tests/kafka/TestKafkaSmoke.groovy        |  27 +-
 kafka/README.md                                    |  25 --
 kafka/kafka-client.yaml                            |  14 +
 kafka/kafka.yaml                                   |   7 -
 kafka/schema-registry/values.yaml                  | 175 ++++++++
 kafka/values.yaml                                  | 469 +++++++++++++++++++++
 7 files changed, 705 insertions(+), 49 deletions(-)

diff --git a/README.md b/README.md
index 53f07d8..b708199 100755
--- a/README.md
+++ b/README.md
@@ -129,6 +129,43 @@ $ kubectl exec -n $NS zookeeper-0 -- bin/zkServer.sh status
 ```
 Refer to https://github.com/helm/charts/tree/master/incubator/zookeeper for 
more configurations.
 
+## Kafka
+Deploy Kafka cluser via Helm:
+```
+$ helm repo add incubator 
http://storage.googleapis.com/kubernetes-charts-incubator
+$ helm install --name kafka \
+--namespace bigtop \
+-f kafka/values.yaml \
+--set 
zookeeper.url="zookeeper-0.zookeeper-headless:2181\,zookeeper-1.zookeeper-headless:2181\,zookeeper-2.zookeeper-headless:2181"
 \
+incubator/kafka
+
+# Deploy Kafka client:
+$ kubectl create --namespace bigtop -f kafka/kafka-client.yaml
+
+# Usage of Kafka client
+$ export 
ZOOKEEPER_URL="zookeeper-0.zookeeper-headless:2181,zookeeper-1.zookeeper-headless:2181,zookeeper-2.zookeeper-headless:2181"
+
+# List all topics
+$ kubectl -n bigtop exec kafka-client -- kafka-topics \
+--zookeeper  $ZOOKEEPER_URL \
+--list
+
+# To create a new topic:
+$ kubectl -n bigtop exec kafka-client -- kafka-topics \
+--zookeeper $ZOOKEEPER_URL \
+--topic test1 --create --partitions 1 --replication-factor 1
+
+```
+
+### Schema Registry 
+Optionally, You can create schema registry service for Kafka:
+```
+helm install --name kafka-schema-registry --namespace bigtop -f 
kafka/schema-registry/values.yaml \
+--set 
kafkaStore.overrideBootstrapServers="kafka-0.kafka-headless:9092\,kafka-1.kafka-headless:9092\,kafka-2.kafka-headless:9092"
 \
+incubator/schema-registry
+
+```
+
 Getting Started
 ===============
 
diff --git a/bigtop-tests/smoke-tests/kafka/TestKafkaSmoke.groovy 
b/bigtop-tests/smoke-tests/kafka/TestKafkaSmoke.groovy
index 9dafd47..16ba255 100644
--- a/bigtop-tests/smoke-tests/kafka/TestKafkaSmoke.groovy
+++ b/bigtop-tests/smoke-tests/kafka/TestKafkaSmoke.groovy
@@ -31,35 +31,28 @@ import org.junit.runner.RunWith
 class TestKafkaSmoke {
   static Shell sh = new Shell("/bin/bash -s");
 
-  static final String KAFKA_HOME = "/usr/lib/kafka"
-  static final String KAFKA_CONFIG = KAFKA_HOME + "/config/server.properties "
-  static final String KAFKA_TOPICS = KAFKA_HOME + "/bin/kafka-topics.sh "
-  static final String KAFKA_SERVER_START = KAFKA_HOME + 
"/bin/kafka-server-start.sh "
-  static final String KAFKA_SERVER_STOP = KAFKA_HOME + 
"/bin/kafka-server-stop.sh "
+  static String ZOOKEEPER_URL = 
"zookeeper-0.zookeeper-headless:2181,zookeeper-1.zookeeper-headless:2181,zookeeper-2.zookeeper-headless:2181";
 
   @BeforeClass
   static void kafkaSetUp() {
-    /* Restart kafka server for Enabling 'delete.topic.enable' */
-    sh.exec(KAFKA_SERVER_STOP);
-    sh.exec(KAFKA_SERVER_START + KAFKA_CONFIG
-      + " --override delete.topic.enable=true &"
-    );
-    assertTrue("Restart Kafka server failed. ", sh.getRet() == 0);
   }
 
   @AfterClass
   public static void deleteKafkaTopics() {
-    sh.exec(KAFKA_TOPICS
-      + " --zookeeper localhost:2181"
-      + " --delete --topic test"
-    );
+    sh.exec(kubectl -n bigtop exec kafka-client -- kafka-topics --zookeeper "
+    + ZOOKEEPER_URL
+    + " --topic test1 --delete");
     assertTrue("Delete Kafka topics failed. ", sh.getRet() == 0);
   }
 
   @Test
   public void testCreateTopics() {
-    sh.exec(KAFKA_TOPICS + " --create --zookeeper localhost:2181 
--replication-factor 1 --partitions 1 --topic test");
-    sh.exec(KAFKA_TOPICS + " --list --zookeeper localhost:2181");
+    sh.exec("kubectl -n bigtop exec kafka-client -- kafka-topics --zookeeper "
+    + ZOOKEEPER_URL
+    + " --topic test1 --create --partitions 1 --replication-factor 1");
+    sh.exec("kubectl -n bigtop exec kafka-client -- kafka-topics --zookeeper "
+    + ZOOKEEPER_URL
+    + " --list");
     assertTrue(" Create Kafka topics failed. " + sh.getOut() + " " + 
sh.getErr(), sh.getRet() == 0);
   }
 }
diff --git a/kafka/README.md b/kafka/README.md
deleted file mode 100755
index 5499851..0000000
--- a/kafka/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# This will use PVCs from a volume controller, similar to nifi.
-
-Check the existing NIFI deployment for how to do the volume controller.  
-
-`helm repo add incubator 
http://storage.googleapis.com/kubernetes-charts-incubator`
- 
-`helm install --name my-kafka incubator/kafka`
- 
-`helm delete --purge my-kafka`
-
-# Reuse Zookeeper
-
-
-To reuse zookeeper from the other examples, this helm chart needs to be 
modified
-to inject zookeeper.  For an example of how to do that, first create the 
configmap inthis
-directory.
-
-From there, modify the statefulset : `kubectl edit statefulset my-kafka`, 
adding this stanza
-to the end of the env declarations, which will cause it to reuse the ZK url 
specified inside 
-of the kafka implementation.
-```
-        envFrom:
-        - configMapRef:
-            name: kafka-cm
-```
diff --git a/kafka/kafka-client.yaml b/kafka/kafka-client.yaml
new file mode 100644
index 0000000..d64722b
--- /dev/null
+++ b/kafka/kafka-client.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: kafka-client
+  namespace: bigtop
+spec:
+  containers:
+  - name: kafka
+    image: confluentinc/cp-kafka:5.0.1
+    command:
+      - sh
+      - -c
+      - "exec tail -f /dev/null"
+
diff --git a/kafka/kafka.yaml b/kafka/kafka.yaml
deleted file mode 100755
index 88c9049..0000000
--- a/kafka/kafka.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  creationTimestamp: 2017-12-27T18:36:28Z
-  name: kafka-cm
-data:
- KAFKA_ZOOKEEPER_CONNECT: "nifi-zookeeper:2181"
diff --git a/kafka/schema-registry/values.yaml 
b/kafka/schema-registry/values.yaml
new file mode 100644
index 0000000..1d0e72c
--- /dev/null
+++ b/kafka/schema-registry/values.yaml
@@ -0,0 +1,175 @@
+# Default values for Confluent Schema-Registry
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+
+## schema-registry repository
+image: "confluentinc/cp-schema-registry"
+## The container tag to use
+imageTag: 5.0.1
+## Specify a imagePullPolicy
+## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+imagePullPolicy: "IfNotPresent"
+
+## Number of Schema Registry Pods to Deploy
+replicaCount: 1
+
+## Schema Registry Settings Overrides
+## Configuration Options can be found here: 
https://docs.confluent.io/current/schema-registry/docs/config.html
+configurationOverrides: {}
+  ## The default master.eligiblity is true
+  # master.eligibility: false
+
+## Custom pod annotations
+podAnnotations: {}
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Confluent has production deployment guidelines here:
+## ref: 
https://github.com/confluentinc/schema-registry/blob/master/docs/deployment.rst
+##
+resources: {}
+  # limits:
+  #  cpu: 100m
+  #  memory: 128Mi
+  # requests:
+  #  cpu: 100m
+  #  memory: 128Mi
+
+## The port on which the SchemaRegistry will be available and serving requests
+servicePort: 8081
+
+## Provides schema registry service settings
+service:
+  ## Any annotations to add to the service
+  annotations: {}
+  ## Any additional labels to add to the service
+  labels: {}
+
+## If `Kafka.Enabled` is `false`, kafkaStore.overrideBootstrapServers must be 
provided for Master Election.
+## You can list load balanced service endpoint, or list of all brokers (which 
is hard in K8s).  e.g.:
+## overrideBootstrapServers: "PLAINTEXT://dozing-prawn-kafka-headless:9092"
+## Charts uses Kafka Coordinator Master Election: 
https://docs.confluent.io/current/schema-registry/docs/design.html#kafka-coordinator-master-election
+kafkaStore:
+  overrideBootstrapServers: 
"kafka-0.kafka-headless:9092,kafka-1.kafka-headless:9092,kafka-2.kafka-headless:9092"
+  # By Default uses Release Name, but can be overridden.  Which means each 
release is its own group of
+  # Schema Registry workers.  You can have multiple groups talking to same 
Kafka Cluster
+  overrideGroupId: ""
+  ## Additional Java arguments to pass to Kafka.
+  # schemaRegistryOpts: -Dfoo=bar
+
+## Readiness probe config.
+## ref: 
https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+##
+readinessProbe:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 3
+
+# Options for connecting to SASL kafka brokers
+sasl:
+  configPath: "/etc/kafka-config"
+  scram:
+    enabled: false
+    init:
+      image: "confluentinc/cp-schema-registry"
+      imageTag: "5.0.1"
+      imagePullPolicy: "IfNotPresent"
+    clientUser: "kafka-client"
+    zookeeperClientUser: "zookeeper-client"
+    # Passwords can be either provided here or pulled from an existing k8s 
secret.
+    # If user wants to specify the password here:
+    clientPassword: "client-password"
+    zookeeperClientPassword: "zookeeper-client-password"
+    # If user has an existing k8s secret they would like to use instead of 
generating them:
+    # useExistingSecret:
+    #   # Where to find the schema registry user secret
+    #   clientPassword:
+    #     secretKeyRef:
+    #       name: "schema-reg-secret"
+    #       key: "client-password"
+    #   # Where to find the zookeeper user secret
+    #   zookeeperClientPassword:
+    #     secretKeyRef:
+    #       name: "zookeeper-secret"
+    #       key: "zokeeper-client-password"
+
+## Kafka Settings
+kafka:
+  ## This is enabled only to allow installations of this chart without 
arguments
+  enabled: false
+  ## Override kafka settings for default installations
+  configurationOverrides:
+    # Needed to run with 1 Kafka Broker
+    offsets.topic.replication.factor: 1
+  ## Run only a single kafka broker by default
+  replicas: 1
+
+  ## Kafka Zookeeper chart settings
+  zookeeper:
+    # Install only a single Zookeeper pod in the StatefulSet
+    replicaCount: 1
+
+## Provides schema registry ingress settings
+ingress:
+  ## If true provide ingress to the schema registry
+  enabled: false
+  ## Annotations for the ingress, if any
+  annotations: {}
+  ## Hostname of the ingress
+  hostname: ""
+  ## Any additional labels to add to the ingress
+  labels: {}
+  tls:
+    enabled: false
+    secretName: schema-registry-tls
+
+## External Nodeport/LoadBalancer for Cloud Providers
+external:
+  enabled: false
+  type: LoadBalancer
+  servicePort: 443
+  loadBalancerIP: ""
+  nodePort: ""
+
+## Provide JMX Port
+jmx:
+  enabled: true
+  port: 5555
+
+## Prometheus Exporter Configuration
+## ref: https://prometheus.io/docs/instrumenting/exporters/
+prometheus:
+  ## JMX Exporter Configuration
+  ## ref: https://github.com/prometheus/jmx_exporter
+  jmx:
+    enabled: false
+    image: solsson/kafka-prometheus-jmx-exporter@sha256
+    imageTag: 6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143
+    port: 5556
+    resources: {}
+      # limits:
+      #  cpu: 100m
+      #  memory: 128Mi
+      # requests:
+      #  cpu: 100m
+      #  memory: 128Mi
+
+## Pass any secrets to the pods. The secrets will be mounted to a specfic path
+## OR presented as Environment Variables. Environment variable names are
+## generated as: `<secretName>_<secretKey>` (All upper case)
+## note: Keystore/Truststore are binary and should always be presented as 
files.
+secrets: []
+# - name: schema-registry-jks
+#   keys:
+#     - ksr-server.truststore.jks
+#     - ksr-server.keystore.jks
+#   mountPath: /secrets
+# - name: schema-registry-jks-pw
+#   keys:
+#     - ssl_truststore_password
+#     - ssl_keystore_password
+#     - ssl_key_password
diff --git a/kafka/values.yaml b/kafka/values.yaml
new file mode 100644
index 0000000..bd4b789
--- /dev/null
+++ b/kafka/values.yaml
@@ -0,0 +1,469 @@
+# 
------------------------------------------------------------------------------
+# Kafka:
+# 
------------------------------------------------------------------------------
+
+## The StatefulSet installs 3 pods by default
+replicas: 3
+
+## The kafka image repository
+image: "confluentinc/cp-kafka"
+
+## The kafka image tag
+imageTag: "5.0.1"  # Confluent image for Kafka 2.0.0
+
+## Specify a imagePullPolicy
+## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+imagePullPolicy: "IfNotPresent"
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+resources: {}
+  # limits:
+  #   cpu: 200m
+  #   memory: 1536Mi
+  # requests:
+  #   cpu: 100m
+  #   memory: 1024Mi
+kafkaHeapOptions: "-Xmx1G -Xms1G"
+
+## Optional Container Security context
+securityContext: {}
+
+## The StatefulSet Update Strategy which Kafka will use when changes are 
applied.
+## ref: 
https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+updateStrategy:
+  type: "OnDelete"
+
+## Start and stop pods in Parallel or OrderedReady (one-by-one.)  Note - Can 
not change after first release.
+## ref: 
https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
+podManagementPolicy: OrderedReady
+
+## Useful if using any custom authorizer
+## Pass in some secrets to use (if required)
+# secrets:
+# - name: myKafkaSecret
+#   keys:
+#     - username
+#     - password
+#   # mountPath: /opt/kafka/secret
+# - name: myZkSecret
+#   keys:
+#     - user
+#     - pass
+#   mountPath: /opt/zookeeper/secret
+
+
+## The subpath within the Kafka container's PV where logs will be stored.
+## This is combined with `persistence.mountPath`, to create, by default: 
/opt/kafka/data/logs
+logSubPath: "logs"
+
+## Use an alternate scheduler, e.g. "stork".
+## ref: 
https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+# schedulerName:
+
+## Use an alternate serviceAccount
+## Useful when using images in custom repositories
+# serviceAccountName:
+
+## Set a pod priorityClassName
+# priorityClassName: high-priority
+
+## Pod scheduling preferences (by default keep pods within a release on 
separate nodes).
+## ref: 
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+## By default we don't set affinity
+affinity: {}
+## Alternatively, this typical example defines:
+## antiAffinity (to keep Kafka pods on separate pods)
+## and affinity (to encourage Kafka pods to be collocated with Zookeeper pods)
+# affinity:
+#   podAntiAffinity:
+#     requiredDuringSchedulingIgnoredDuringExecution:
+#     - labelSelector:
+#         matchExpressions:
+#         - key: app
+#           operator: In
+#           values:
+#           - kafka
+#       topologyKey: "kubernetes.io/hostname"
+#   podAffinity:
+#     preferredDuringSchedulingIgnoredDuringExecution:
+#      - weight: 50
+#        podAffinityTerm:
+#          labelSelector:
+#            matchExpressions:
+#            - key: app
+#              operator: In
+#              values:
+#                - zookeeper
+#          topologyKey: "kubernetes.io/hostname"
+
+## Node labels for pod assignment
+## ref: 
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+nodeSelector: {}
+
+## Readiness probe config.
+## ref: 
https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+##
+readinessProbe:
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 3
+
+## Period to wait for broker graceful shutdown (sigterm) before pod is killed 
(sigkill)
+## ref: 
https://kubernetes-v1-4.github.io/docs/user-guide/production-pods/#lifecycle-hooks-and-termination-notice
+## ref: https://kafka.apache.org/10/documentation.html#brokerconfigs 
controlled.shutdown.*
+terminationGracePeriodSeconds: 60
+
+# Tolerations for nodes that have taints on them.
+# Useful if you want to dedicate nodes to just run kafka
+# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+tolerations: []
+# tolerations:
+# - key: "key"
+#   operator: "Equal"
+#   value: "value"
+#   effect: "NoSchedule"
+
+## Headless service.
+##
+headless:
+  # annotations:
+  # targetPort:
+  port: 9092
+
+## External access.
+##
+external:
+  enabled: false
+  # type can be either NodePort or LoadBalancer
+  type: NodePort
+  # annotations:
+  #  service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
+  dns:
+    useInternal: false
+    useExternal: true
+  # If using external service type LoadBalancer and external dns, set distinct 
to true below.
+  # This creates an A record for each statefulset pod/broker. You should then 
map the
+  # A record of the broker to the EXTERNAL IP given by the LoadBalancer in 
your DNS server.
+  distinct: false
+  servicePort: 19092
+  firstListenerPort: 31090
+  domain: cluster.local
+  loadBalancerIP: []
+  init:
+    image: "lwolf/kubectl_deployer"
+    imageTag: "0.4"
+    imagePullPolicy: "IfNotPresent"
+
+# Annotation to be added to Kafka pods
+podAnnotations: {}
+
+# Labels to be added to Kafka pods
+podLabels: {}
+  # service: broker
+  # team: developers
+
+podDisruptionBudget: {}
+  # maxUnavailable: 1  # Limits how many Kafka pods may be unavailable due to 
voluntary disruptions.
+
+## Configuration Overrides. Specify any Kafka settings you would like set on 
the StatefulSet
+## here in map format, as defined in the official docs.
+## ref: https://kafka.apache.org/documentation/#brokerconfigs
+##
+configurationOverrides:
+  "confluent.support.metrics.enable": false  # Disables confluent metric 
submission
+  # "auto.leader.rebalance.enable": true
+  # "auto.create.topics.enable": true
+  # "controlled.shutdown.enable": true
+  # "controlled.shutdown.max.retries": 100
+
+  ## Options required for external access via NodePort
+  ## ref:
+  ## - http://kafka.apache.org/documentation/#security_configbroker
+  ## - 
https://cwiki.apache.org/confluence/display/KAFKA/KIP-103%3A+Separation+of+Internal+and+External+traffic
+  ##
+  ## Setting "advertised.listeners" here appends to 
"PLAINTEXT://${POD_IP}:9092,", ensure you update the domain
+  ## If external service type is Nodeport:
+  # "advertised.listeners": |-
+  #   EXTERNAL://kafka.cluster.local:$((31090 + ${KAFKA_BROKER_ID}))
+  ## If external service type is LoadBalancer and distinct is true:
+  # "advertised.listeners": |-
+  #   EXTERNAL://kafka-$((${KAFKA_BROKER_ID})).cluster.local:19092
+  ## If external service type is LoadBalancer and distinct is false:
+  # "advertised.listeners": |-
+  #   EXTERNAL://${LOAD_BALANCER_IP}:31090
+  ## Uncomment to define the EXTERNAL Listener protocol
+  # "listener.security.protocol.map": |-
+  #   PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
+
+## set extra ENVs
+#   key: "value"
+envOverrides: {}
+
+
+## A collection of additional ports to expose on brokers (formatted as normal 
containerPort yaml)
+# Useful when the image exposes metrics (like prometheus, etc.) through a 
javaagent instead of a sidecar
+additionalPorts: {}
+
+## Persistence configuration. Specify if and how to persist data to a 
persistent volume.
+##
+persistence:
+  enabled: true
+
+  ## The size of the PersistentVolume to allocate to each Kafka Pod in the 
StatefulSet. For
+  ## production servers this number should likely be much larger.
+  ##
+  size: "1Gi"
+
+  ## The location within the Kafka container where the PV will mount its 
storage and Kafka will
+  ## store its logs.
+  ##
+  mountPath: "/opt/kafka/data"
+
+  ## Kafka data Persistent Volume Storage Class
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  # storageClass:
+  storageClass: "rook-ceph-block"
+
+jmx:
+  ## Rules to apply to the Prometheus JMX Exporter.  Note while lots of stats 
have been cleaned and exposed,
+  ## there are still more stats to clean up and expose, others will never get 
exposed.  They keep lots of duplicates
+  ## that can be derived easily.  The configMap in this chart cleans up the 
metrics it exposes to be in a Prometheus
+  ## format, eg topic, broker are labels and not part of metric name. 
Improvements are gladly accepted and encouraged.
+  configMap:
+
+    ## Allows disabling the default configmap, note a configMap is needed
+    enabled: true
+
+    ## Allows setting values to generate confimap
+    ## To allow all metrics through (warning its crazy excessive) comment out 
below `overrideConfig` and set
+    ## `whitelistObjectNames: []`
+    overrideConfig: {}
+      # jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
+      # lowercaseOutputName: true
+      # lowercaseOutputLabelNames: true
+      # ssl: false
+      # rules:
+      # - pattern: ".*"
+
+    ## If you would like to supply your own ConfigMap for JMX metrics, supply 
the name of that
+    ## ConfigMap as an `overrideName` here.
+    overrideName: ""
+
+  ## Port the jmx metrics are exposed in native jmx format, not in Prometheus 
format
+  port: 5555
+
+  ## JMX Whitelist Objects, can be set to control which JMX metrics are 
exposed.  Only whitelisted
+  ## values will be exposed via JMX Exporter.  They must also be exposed via 
Rules.  To expose all metrics
+  ## (warning its crazy excessive and they aren't formatted in a prometheus 
style) (1) `whitelistObjectNames: []`
+  ## (2) commented out above `overrideConfig`.
+  whitelistObjectNames:  # []
+  - kafka.controller:*
+  - kafka.server:*
+  - java.lang:*
+  - kafka.network:*
+  - kafka.log:*
+
+## Prometheus Exporters / Metrics
+##
+prometheus:
+  ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics
+  jmx:
+    enabled: false
+
+    ## The image to use for the metrics collector
+    image: solsson/kafka-prometheus-jmx-exporter@sha256
+
+    ## The image tag to use for the metrics collector
+    imageTag: a23062396cd5af1acdf76512632c20ea6be76885dfc20cd9ff40fb23846557e8
+
+    ## Interval at which Prometheus scrapes metrics, note: only used by 
Prometheus Operator
+    interval: 10s
+
+    ## Timeout at which Prometheus timeouts scrape run, note: only used by 
Prometheus Operator
+    scrapeTimeout: 10s
+
+    ## Port jmx-exporter exposes Prometheus format metrics to scrape
+    port: 5556
+
+    resources: {}
+      # limits:
+      #   cpu: 200m
+      #   memory: 1Gi
+      # requests:
+      #   cpu: 100m
+      #   memory: 100Mi
+
+  ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter
+  kafka:
+    enabled: false
+
+    ## The image to use for the metrics collector
+    image: danielqsj/kafka-exporter
+
+    ## The image tag to use for the metrics collector
+    imageTag: v1.2.0
+
+    ## Interval at which Prometheus scrapes metrics, note: only used by 
Prometheus Operator
+    interval: 10s
+
+    ## Timeout at which Prometheus timeouts scrape run, note: only used by 
Prometheus Operator
+    scrapeTimeout: 10s
+
+    ## Port kafka-exporter exposes for Prometheus to scrape metrics
+    port: 9308
+
+    ## Resource limits
+    resources: {}
+#      limits:
+#        cpu: 200m
+#        memory: 1Gi
+#      requests:
+#        cpu: 100m
+#        memory: 100Mi
+
+    # Tolerations for nodes that have taints on them.
+    # Useful if you want to dedicate nodes to just run kafka-exporter
+    # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+    tolerations: []
+    # tolerations:
+    # - key: "key"
+    #   operator: "Equal"
+    #   value: "value"
+    #   effect: "NoSchedule"
+
+    ## Pod scheduling preferences (by default keep pods within a release on 
separate nodes).
+    ## ref: 
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+    ## By default we don't set affinity
+    affinity: {}
+    ## Alternatively, this typical example defines:
+    ## affinity (to encourage Kafka Exporter pods to be collocated with Kafka 
pods)
+    # affinity:
+    #   podAffinity:
+    #     preferredDuringSchedulingIgnoredDuringExecution:
+    #      - weight: 50
+    #        podAffinityTerm:
+    #          labelSelector:
+    #            matchExpressions:
+    #            - key: app
+    #              operator: In
+    #              values:
+    #                - kafka
+    #          topologyKey: "kubernetes.io/hostname"
+
+    ## Node labels for pod assignment
+    ## ref: 
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+    nodeSelector: {}
+
+  operator:
+    ## Are you using Prometheus Operator?
+    enabled: false
+
+    serviceMonitor:
+      # Namespace Prometheus is installed in
+      namespace: monitoring
+
+      ## Defaults to whats used if you follow CoreOS [Prometheus Install 
Instructions](https://github.com/coreos/prometheus-operator/tree/master/helm#tldr)
+      ## [Prometheus Selector 
Label](https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus/templates/prometheus.yaml#L65)
+      ## [Kube Prometheus Selector 
Label](https://github.com/coreos/prometheus-operator/blob/master/helm/kube-prometheus/values.yaml#L298)
+      selector:
+        prometheus: kube-prometheus
+
+## Kafka Config job configuration
+##
+configJob:
+  ## Specify the number of retries before considering kafka-config job as 
failed.
+  ## 
https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
+  backoffLimit: 6
+
+## Topic creation and configuration.
+## The job will be run on a deployment only when the config has been changed.
+## - If 'partitions' and 'replicationFactor' are specified we create the topic 
(with --if-not-exists.)
+## - If 'partitions', 'replicationFactor' and 'reassignPartitions' are 
specified we reassign the partitions to
+## increase the replication factor of an existing topic.
+## - If 'partitions' is specified we 'alter' the number of partitions. This 
will
+## silently and safely fail if the new setting isn’t strictly larger than the 
old (i.e. a NOOP.) Do be aware of the
+## implications for keyed topics (ref: 
https://docs.confluent.io/current/kafka/post-deployment.html#admin-operations)
+## - If 'defaultConfig' is specified it's deleted from the topic 
configuration. If it isn't present,
+## it will silently and safely fail.
+## - If 'config' is specified it's added to the topic configuration.
+##
+## Note: To increase the 'replicationFactor' of a topic, 'reassignPartitions' 
must be set to true (see above).
+##
+topics: []
+  # - name: myExistingTopicConfig
+  #   config: "cleanup.policy=compact,delete.retention.ms=604800000"
+  # - name: myExistingTopicReassignPartitions
+  #   partitions: 8
+  #   replicationFactor: 5
+  #   reassignPartitions: true
+  # - name: myExistingTopicPartitions
+  #   partitions: 8
+  # - name: myNewTopicWithConfig
+  #   partitions: 8
+  #   replicationFactor: 3
+  #   defaultConfig: "segment.bytes,segment.ms"
+  #   config: "cleanup.policy=compact,delete.retention.ms=604800000"
+  # - name: myAclTopicPartitions
+  #   partitions: 8
+  #   acls:
+  #     - user: read
+  #       operations: [ Read ]
+  #     - user: read_and_write
+  #       operations:
+  #         - Read
+  #         - Write
+  #     - user: all
+  #       operations: [ All ]
+
+# 
------------------------------------------------------------------------------
+# Zookeeper:
+# 
------------------------------------------------------------------------------
+
+zookeeper:
+  zookeeper.url: 
"zookeeper-0.zookeeper-headless:2181,zookeeper-1.zookeeper-headless:2181,zookeeper-2.zookeeper-headless:2181/kafka"
+  ## If true, install the Zookeeper chart alongside Kafka
+  ## ref: https://github.com/kubernetes/charts/tree/master/incubator/zookeeper
+  enabled: false 
+
+  ## Configure Zookeeper resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  resources: ~
+
+  ## Environmental variables to set in Zookeeper
+  env:
+    ## The JVM heap size to allocate to Zookeeper
+    ZK_HEAP_SIZE: "1G"
+
+  persistence:
+    enabled: false
+    ## The amount of PV storage allocated to each Zookeeper pod in the 
statefulset
+    # size: "2Gi"
+
+  ## Specify a Zookeeper imagePullPolicy
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  image:
+    PullPolicy: "IfNotPresent"
+
+  ## If the Zookeeper Chart is disabled a URL and port are required to connect
+  url: ""
+  port: 2181
+
+  ## Pod scheduling preferences (by default keep pods within a release on 
separate nodes).
+  ## ref: 
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ## By default we don't set affinity:
+  affinity: {}  # Criteria by which pod label-values influence scheduling for 
zookeeper pods.
+  # podAntiAffinity:
+  #   requiredDuringSchedulingIgnoredDuringExecution:
+  #     - topologyKey: "kubernetes.io/hostname"
+  #       labelSelector:
+  #         matchLabels:
+  #           release: zookeeper

Reply via email to