Bruno Cadonna created KAFKA-8441:
------------------------------------

             Summary: CLONE - Flaky Test 
KStreamAggregationIntegrationTest#shouldReduceSessionWindows
                 Key: KAFKA-8441
                 URL: https://issues.apache.org/jira/browse/KAFKA-8441
             Project: Kafka
          Issue Type: Bug
          Components: streams, unit tests
    Affects Versions: 2.3.0
            Reporter: Bruno Cadonna


h1. Stacktrace:
{noformat}
java.lang.AssertionError: 
Expected: <KeyValue(start, 1558759753345)>
     but: was null
        at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:18)
        at org.hamcrest.MatcherAssert.assertThat(MatcherAssert.java:6)
        at 
org.apache.kafka.streams.integration.KStreamAggregationIntegrationTest.shouldReduceSessionWindows(KStreamAggregationIntegrationTest.java:663)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:305)
        at 
org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100)
        at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:365)
        at 
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103)
        at 
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63)
        at org.junit.runners.ParentRunner$4.run(ParentRunner.java:330)
        at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:78)
        at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:328)
        at org.junit.runners.ParentRunner.access$100(ParentRunner.java:65)
        at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:292)
        at org.junit.rules.ExternalResource$1.evaluate(ExternalResource.java:54)
        at org.junit.rules.RunRules.evaluate(RunRules.java:20)
        at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:305)
        at org.junit.runners.ParentRunner.run(ParentRunner.java:412)
        at 
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor.runTestClass(JUnitTestClassExecutor.java:110)
        at 
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor.execute(JUnitTestClassExecutor.java:58)
        at 
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor.execute(JUnitTestClassExecutor.java:38)
        at 
org.gradle.api.internal.tasks.testing.junit.AbstractJUnitTestClassProcessor.processTestClass(AbstractJUnitTestClassProcessor.java:62)
        at 
org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor.processTestClass(SuiteTestClassProcessor.java:51)
        at sun.reflect.GeneratedMethodAccessor17.invoke(Unknown Source)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:35)
        at 
org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24)
        at 
org.gradle.internal.dispatch.ContextClassLoaderDispatch.dispatch(ContextClassLoaderDispatch.java:32)
        at 
org.gradle.internal.dispatch.ProxyDispatchAdapter$DispatchingInvocationHandler.invoke(ProxyDispatchAdapter.java:93)
        at com.sun.proxy.$Proxy2.processTestClass(Unknown Source)
        at 
org.gradle.api.internal.tasks.testing.worker.TestWorker.processTestClass(TestWorker.java:118)
        at sun.reflect.GeneratedMethodAccessor16.invoke(Unknown Source)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:35)
        at 
org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24)
        at 
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection$DispatchWrapper.dispatch(MessageHubBackedObjectConnection.java:175)
        at 
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection$DispatchWrapper.dispatch(MessageHubBackedObjectConnection.java:157)
        at 
org.gradle.internal.remote.internal.hub.MessageHub$Handler.run(MessageHub.java:404)
        at 
org.gradle.internal.concurrent.ExecutorPolicy$CatchAndRecordFailures.onExecute(ExecutorPolicy.java:63)
        at 
org.gradle.internal.concurrent.ManagedExecutorImpl$1.run(ManagedExecutorImpl.java:46)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at 
org.gradle.internal.concurrent.ThreadFactoryImpl$ManagedThreadRunnable.run(ThreadFactoryImpl.java:55)
        at java.lang.Thread.run(Thread.java:748)

{noformat}
h1. Standard Output:
{noformat}
[2019-05-25 04:49:13,346] INFO Created server with tickTime 800 
minSessionTimeout 1600 maxSessionTimeout 16000 datadir 
/tmp/kafka-3440510347102689471/version-2 snapdir 
/tmp/kafka-7541388714805220321/version-2 
(org.apache.zookeeper.server.ZooKeeperServer:174)
[2019-05-25 04:49:13,346] INFO binding to port /127.0.0.1:0 
(org.apache.zookeeper.server.NIOServerCnxnFactory:89)
[2019-05-25 04:49:13,355] INFO SessionTrackerImpl exited loop! 
(org.apache.zookeeper.server.SessionTrackerImpl:163)
[2019-05-25 04:49:13,384] INFO KafkaConfig values: 
        advertised.host.name = null
        advertised.listeners = null
        advertised.port = null
        alter.config.policy.class.name = null
        alter.log.dirs.replication.quota.window.num = 11
        alter.log.dirs.replication.quota.window.size.seconds = 1
        authorizer.class.name = 
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        client.quota.callback.class = null
        compression.type = producer
        connection.failed.authentication.delay.ms = 100
        connections.max.idle.ms = 600000
        connections.max.reauth.ms = 0
        control.plane.listener.name = null
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        create.topic.policy.class.name = null
        default.replication.factor = 1
        delegation.token.expiry.check.interval.ms = 3600000
        delegation.token.expiry.time.ms = 86400000
        delegation.token.master.key = null
        delegation.token.max.lifetime.ms = 604800000
        delete.records.purgatory.purge.interval.requests = 1
        delete.topic.enable = true
        fetch.purgatory.purge.interval.requests = 1000
        group.initial.rebalance.delay.ms = 0
        group.max.session.timeout.ms = 1800000
        group.max.size = 2147483647
        group.min.session.timeout.ms = 0
        host.name = localhost
        inter.broker.listener.name = null
        inter.broker.protocol.version = 2.3-IV1
        kafka.metrics.polling.interval.secs = 10
        kafka.metrics.reporters = []
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listener.security.protocol.map = 
PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
        listeners = null
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 2097152
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.max.compaction.lag.ms = 9223372036854775807
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/junit7423677051290926261/junit6332204664748699761
        log.dirs = null
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.flush.start.offset.checkpoint.interval.ms = 60000
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.downconversion.enable = true
        log.message.format.version = 2.3-IV1
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections = 2147483647
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides = 
        max.incremental.fetch.session.cache.slots = 1000
        message.max.bytes = 1000000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.alter.log.dirs.threads = null
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 10080
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 1
        offsets.topic.segment.bytes = 104857600
        password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
        password.encoder.iterations = 4096
        password.encoder.key.length = 128
        password.encoder.keyfactory.algorithm = null
        password.encoder.old.secret = null
        password.encoder.secret = null
        port = 0
        principal.builder.class = null
        producer.purgatory.purge.interval.requests = 1000
        queued.max.request.bytes = -1
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 10000
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.client.callback.handler.class = null
        sasl.enabled.mechanisms = [GSSAPI]
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism.inter.broker.protocol = GSSAPI
        sasl.server.callback.handler.class = null
        security.inter.broker.protocol = PLAINTEXT
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = []
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = https
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.principal.mapping.rules = [DEFAULT]
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
        transaction.max.timeout.ms = 900000
        transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
        transaction.state.log.load.buffer.size = 5242880
        transaction.state.log.min.isr = 2
        transaction.state.log.num.partitions = 50
        transaction.state.log.replication.factor = 3
        transaction.state.log.segment.bytes = 104857600
        transactional.id.expiration.ms = 604800000
        unclean.leader.election.enable = false
        zookeeper.connect = 127.0.0.1:45884
        zookeeper.connection.timeout.ms = null
        zookeeper.max.in.flight.requests = 10
        zookeeper.session.timeout.ms = 10000
        zookeeper.set.acl = false
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig:346)
[2019-05-25 04:49:13,385] INFO starting (kafka.server.KafkaServer:66)
[2019-05-25 04:49:13,385] INFO Connecting to zookeeper on 127.0.0.1:45884 
(kafka.server.KafkaServer:66)
[2019-05-25 04:49:13,386] INFO [ZooKeeperClient Kafka server] Initializing a 
new session to 127.0.0.1:45884. (kafka.zookeeper.ZooKeeperClient:66)
[2019-05-25 04:49:13,386] INFO Initiating client connection, 
connectString=127.0.0.1:45884 sessionTimeout=10000 
watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@49429620 
(org.apache.zookeeper.ZooKeeper:442)
[2019-05-25 04:49:13,391] INFO [ZooKeeperClient Kafka server] Waiting until 
connected. (kafka.zookeeper.ZooKeeperClient:66)
[2019-05-25 04:49:13,395] INFO Opening socket connection to server 
localhost/127.0.0.1:45884. Will not attempt to authenticate using SASL (unknown 
error) (org.apache.zookeeper.ClientCnxn:1025)
[2019-05-25 04:49:13,396] INFO Socket connection established to 
localhost/127.0.0.1:45884, initiating session 
(org.apache.zookeeper.ClientCnxn:879)
[2019-05-25 04:49:13,399] INFO Accepted socket connection from /127.0.0.1:56622 
(org.apache.zookeeper.server.NIOServerCnxnFactory:222)
[2019-05-25 04:49:13,399] INFO Client attempting to establish new session at 
/127.0.0.1:56622 (org.apache.zookeeper.server.ZooKeeperServer:949)
[2019-05-25 04:49:13,403] INFO Creating new log file: log.1 
(org.apache.zookeeper.server.persistence.FileTxnLog:216)
[2019-05-25 04:49:13,460] INFO Established session 0x100aaf545600000 with 
negotiated timeout 10000 for client /127.0.0.1:56622 
(org.apache.zookeeper.server.ZooKeeperServer:694)
[2019-05-25 04:49:13,462] INFO Session establishment complete on server 
localhost/127.0.0.1:45884, sessionid = 0x100aaf545600000, negotiated timeout = 
10000 (org.apache.zookeeper.ClientCnxn:1299)
[2019-05-25 04:49:13,467] INFO [ZooKeeperClient Kafka server] Connected. 
(kafka.zookeeper.ZooKeeperClient:66)
[2019-05-25 04:49:13,479] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:create cxid:0x2 zxid:0x3 txntype:-1 
reqpath:n/a Error Path:/brokers Error:KeeperErrorCode = NoNode for /brokers 
(org.apache.zookeeper.server.PrepRequestProcessor:653)
[2019-05-25 04:49:13,530] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:create cxid:0x6 zxid:0x7 txntype:-1 
reqpath:n/a Error Path:/config Error:KeeperErrorCode = NoNode for /config 
(org.apache.zookeeper.server.PrepRequestProcessor:653)
[2019-05-25 04:49:13,532] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:create cxid:0x9 zxid:0xa txntype:-1 
reqpath:n/a Error Path:/admin Error:KeeperErrorCode = NoNode for /admin 
(org.apache.zookeeper.server.PrepRequestProcessor:653)
[2019-05-25 04:49:13,555] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:create cxid:0x15 zxid:0x15 txntype:-1 
reqpath:n/a Error Path:/cluster Error:KeeperErrorCode = NoNode for /cluster 
(org.apache.zookeeper.server.PrepRequestProcessor:653)
[2019-05-25 04:49:13,556] INFO Cluster ID = hU85BYcSQp275IVrGRHt3A 
(kafka.server.KafkaServer:66)
[2019-05-25 04:49:13,556] WARN No meta.properties file under dir 
/tmp/junit7423677051290926261/junit6332204664748699761/meta.properties 
(kafka.server.BrokerMetadataCheckpoint:70)
[2019-05-25 04:49:13,559] INFO KafkaConfig values: 
        advertised.host.name = null
        advertised.listeners = null
        advertised.port = null
        alter.config.policy.class.name = null
        alter.log.dirs.replication.quota.window.num = 11
        alter.log.dirs.replication.quota.window.size.seconds = 1
        authorizer.class.name = 
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        client.quota.callback.class = null
        compression.type = producer
        connection.failed.authentication.delay.ms = 100
        connections.max.idle.ms = 600000
        connections.max.reauth.ms = 0
        control.plane.listener.name = null
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        create.topic.policy.class.name = null
        default.replication.factor = 1
        delegation.token.expiry.check.interval.ms = 3600000
        delegation.token.expiry.time.ms = 86400000
        delegation.token.master.key = null
        delegation.token.max.lifetime.ms = 604800000
        delete.records.purgatory.purge.interval.requests = 1
        delete.topic.enable = true
        fetch.purgatory.purge.interval.requests = 1000
        group.initial.rebalance.delay.ms = 0
        group.max.session.timeout.ms = 1800000
        group.max.size = 2147483647
        group.min.session.timeout.ms = 0
        host.name = localhost
        inter.broker.listener.name = null
        inter.broker.protocol.version = 2.3-IV1
        kafka.metrics.polling.interval.secs = 10
        kafka.metrics.reporters = []
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listener.security.protocol.map = 
PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
        listeners = null
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 2097152
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.max.compaction.lag.ms = 9223372036854775807
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/junit7423677051290926261/junit6332204664748699761
        log.dirs = null
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.flush.start.offset.checkpoint.interval.ms = 60000
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.downconversion.enable = true
        log.message.format.version = 2.3-IV1
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections = 2147483647
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides = 
        max.incremental.fetch.session.cache.slots = 1000
        message.max.bytes = 1000000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.alter.log.dirs.threads = null
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 10080
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 1
        offsets.topic.segment.bytes = 104857600
        password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
        password.encoder.iterations = 4096
        password.encoder.key.length = 128
        password.encoder.keyfactory.algorithm = null
        password.encoder.old.secret = null
        password.encoder.secret = null
        port = 0
        principal.builder.class = null
        producer.purgatory.purge.interval.requests = 1000
        queued.max.request.bytes = -1
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 10000
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.client.callback.handler.class = null
        sasl.enabled.mechanisms = [GSSAPI]
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism.inter.broker.protocol = GSSAPI
        sasl.server.callback.handler.class = null
        security.inter.broker.protocol = PLAINTEXT
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = []
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = https
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.principal.mapping.rules = [DEFAULT]
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
        transaction.max.timeout.ms = 900000
        transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
        transaction.state.log.load.buffer.size = 5242880
        transaction.state.log.min.isr = 2
        transaction.state.log.num.partitions = 50
        transaction.state.log.replication.factor = 3
        transaction.state.log.segment.bytes = 104857600
        transactional.id.expiration.ms = 604800000
        unclean.leader.election.enable = false
        zookeeper.connect = 127.0.0.1:45884
        zookeeper.connection.timeout.ms = null
        zookeeper.max.in.flight.requests = 10
        zookeeper.session.timeout.ms = 10000
        zookeeper.set.acl = false
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig:346)
[2019-05-25 04:49:13,562] INFO KafkaConfig values: 
        advertised.host.name = null
        advertised.listeners = null
        advertised.port = null
        alter.config.policy.class.name = null
        alter.log.dirs.replication.quota.window.num = 11
        alter.log.dirs.replication.quota.window.size.seconds = 1
        authorizer.class.name = 
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        client.quota.callback.class = null
        compression.type = producer
        connection.failed.authentication.delay.ms = 100
        connections.max.idle.ms = 600000
        connections.max.reauth.ms = 0
        control.plane.listener.name = null
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        create.topic.policy.class.name = null
        default.replication.factor = 1
        delegation.token.expiry.check.interval.ms = 3600000
        delegation.token.expiry.time.ms = 86400000
        delegation.token.master.key = null
        delegation.token.max.lifetime.ms = 604800000
        delete.records.purgatory.purge.interval.requests = 1
        delete.topic.enable = true
        fetch.purgatory.purge.interval.requests = 1000
        group.initial.rebalance.delay.ms = 0
        group.max.session.timeout.ms = 1800000
        group.max.size = 2147483647
        group.min.session.timeout.ms = 0
        host.name = localhost
        inter.broker.listener.name = null
        inter.broker.protocol.version = 2.3-IV1
        kafka.metrics.polling.interval.secs = 10
        kafka.metrics.reporters = []
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listener.security.protocol.map = 
PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
        listeners = null
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 2097152
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.max.compaction.lag.ms = 9223372036854775807
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/junit7423677051290926261/junit6332204664748699761
        log.dirs = null
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.flush.start.offset.checkpoint.interval.ms = 60000
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.downconversion.enable = true
        log.message.format.version = 2.3-IV1
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections = 2147483647
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides = 
        max.incremental.fetch.session.cache.slots = 1000
        message.max.bytes = 1000000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.alter.log.dirs.threads = null
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 10080
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 1
        offsets.topic.segment.bytes = 104857600
        password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
        password.encoder.iterations = 4096
        password.encoder.key.length = 128
        password.encoder.keyfactory.algorithm = null
        password.encoder.old.secret = null
        password.encoder.secret = null
        port = 0
        principal.builder.class = null
        producer.purgatory.purge.interval.requests = 1000
        queued.max.request.bytes = -1
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 10000
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.client.callback.handler.class = null
        sasl.enabled.mechanisms = [GSSAPI]
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism.inter.broker.protocol = GSSAPI
        sasl.server.callback.handler.class = null
        security.inter.broker.protocol = PLAINTEXT
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = []
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = https
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.principal.mapping.rules = [DEFAULT]
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
        transaction.max.timeout.ms = 900000
        transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
        transaction.state.log.load.buffer.size = 5242880
        transaction.state.log.min.isr = 2
        transaction.state.log.num.partitions = 50
        transaction.state.log.replication.factor = 3
        transaction.state.log.segment.bytes = 104857600
        transactional.id.expiration.ms = 604800000
        unclean.leader.election.enable = false
        zookeeper.connect = 127.0.0.1:45884
        zookeeper.connection.timeout.ms = null
        zookeeper.max.in.flight.requests = 10
        zookeeper.session.timeout.ms = 10000
        zookeeper.set.acl = false
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig:346)
[2019-05-25 04:49:13,590] INFO [ThrottledChannelReaper-Fetch]: Starting 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:49:13,590] INFO [ThrottledChannelReaper-Request]: Starting 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:49:13,590] INFO [ThrottledChannelReaper-Produce]: Starting 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:49:13,599] INFO Loading logs. (kafka.log.LogManager:66)
[2019-05-25 04:49:13,600] INFO Logs loading complete in 0 ms. 
(kafka.log.LogManager:66)
[2019-05-25 04:49:13,600] INFO Starting log cleanup with a period of 300000 ms. 
(kafka.log.LogManager:66)
[2019-05-25 04:49:13,602] INFO Starting log flusher with a default period of 
9223372036854775807 ms. (kafka.log.LogManager:66)
[2019-05-25 04:49:13,619] INFO Starting the log cleaner 
(kafka.log.LogCleaner:66)
[2019-05-25 04:49:13,642] INFO [kafka-log-cleaner-thread-0]: Starting 
(kafka.log.LogCleaner:66)
[2019-05-25 04:49:13,698] INFO Awaiting socket connections on localhost:37841. 
(kafka.network.Acceptor:66)
[2019-05-25 04:49:13,739] INFO [SocketServer brokerId=0] Created data-plane 
acceptor and processors for endpoint : 
EndPoint(localhost,0,ListenerName(PLAINTEXT),PLAINTEXT) 
(kafka.network.SocketServer:66)
[2019-05-25 04:49:13,739] INFO [SocketServer brokerId=0] Started 1 acceptor 
threads for data-plane (kafka.network.SocketServer:66)
[2019-05-25 04:49:13,755] INFO [ExpirationReaper-0-Produce]: Starting 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,759] INFO [ExpirationReaper-0-Fetch]: Starting 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,815] INFO [ExpirationReaper-0-DeleteRecords]: Starting 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,819] INFO [ExpirationReaper-0-ElectPreferredLeader]: 
Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,823] INFO [LogDirFailureHandler]: Starting 
(kafka.server.ReplicaManager$LogDirFailureHandler:66)
[2019-05-25 04:49:13,827] INFO Creating /brokers/ids/0 (is it secure? false) 
(kafka.zk.KafkaZkClient:66)
[2019-05-25 04:49:13,838] INFO Stat of the created znode at /brokers/ids/0 is: 
24,24,1558759753829,1558759753829,1,0,0,72245564445556736,190,0,24
 (kafka.zk.KafkaZkClient:66)
[2019-05-25 04:49:13,838] INFO Registered broker 0 at path /brokers/ids/0 with 
addresses: 
ArrayBuffer(EndPoint(localhost,37841,ListenerName(PLAINTEXT),PLAINTEXT)), czxid 
(broker epoch): 24 (kafka.zk.KafkaZkClient:66)
[2019-05-25 04:49:13,839] WARN No meta.properties file under dir 
/tmp/junit7423677051290926261/junit6332204664748699761/meta.properties 
(kafka.server.BrokerMetadataCheckpoint:70)
[2019-05-25 04:49:13,947] INFO [ControllerEventThread controllerId=0] Starting 
(kafka.controller.ControllerEventManager$ControllerEventThread:66)
[2019-05-25 04:49:13,947] INFO [ExpirationReaper-0-topic]: Starting 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,947] INFO [ExpirationReaper-0-Heartbeat]: Starting 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,949] INFO Successfully created /controller_epoch with 
initial epoch 0 (kafka.zk.KafkaZkClient:66)
[2019-05-25 04:49:13,950] INFO [Controller id=0] 0 successfully elected as the 
controller. Epoch incremented to 1 and epoch zk version is now 1 
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,950] INFO [Controller id=0] Registering handlers 
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,947] INFO [ExpirationReaper-0-Rebalance]: Starting 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:49:13,951] INFO [GroupCoordinator 0]: Starting up. 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:49:13,951] INFO [Controller id=0] Deleting log dir event 
notifications (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,954] INFO [Controller id=0] Deleting isr change 
notifications (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,955] INFO [Controller id=0] Initializing controller 
context (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,958] INFO [Controller id=0] Initialized broker epochs 
cache: Map(0 -> 24) (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,959] INFO [GroupCoordinator 0]: Startup complete. 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:49:13,962] INFO [ProducerId Manager 0]: Acquired new producerId 
block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to 
Zk with path version 1 (kafka.coordinator.transaction.ProducerIdManager:66)
[2019-05-25 04:49:13,963] INFO [GroupMetadataManager brokerId=0] Removed 0 
expired offsets in 0 milliseconds. 
(kafka.coordinator.group.GroupMetadataManager:66)
[2019-05-25 04:49:13,972] INFO [TransactionCoordinator id=0] Starting up. 
(kafka.coordinator.transaction.TransactionCoordinator:66)
[2019-05-25 04:49:13,973] INFO [RequestSendThread controllerId=0] Starting 
(kafka.controller.RequestSendThread:66)
[2019-05-25 04:49:13,975] INFO [Controller id=0] Partitions being reassigned: 
Map() (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,975] INFO [Controller id=0] Currently active brokers in 
the cluster: Set(0) (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,975] INFO [Controller id=0] Currently shutting brokers in 
the cluster: Set() (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,975] INFO [Controller id=0] Current list of topics in the 
cluster: Set() (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,983] INFO [Controller id=0] Fetching topic deletions in 
progress (kafka.controller.KafkaController:66)
[2019-05-25 04:49:13,979] INFO [TransactionCoordinator id=0] Startup complete. 
(kafka.coordinator.transaction.TransactionCoordinator:66)
[2019-05-25 04:49:13,995] INFO [/config/changes-event-process-thread]: Starting 
(kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread:66)
[2019-05-25 04:49:13,995] INFO [Transaction Marker Channel Manager 0]: Starting 
(kafka.coordinator.transaction.TransactionMarkerChannelManager:66)
[2019-05-25 04:49:14,003] INFO [Controller id=0] List of topics to be deleted:  
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,003] INFO [Controller id=0] List of topics ineligible for 
deletion:  (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,003] INFO [Controller id=0] Initializing topic deletion 
manager (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,003] INFO [Topic Deletion Manager 0] Initializing manager 
with initial deletions: Set(), initial ineligible deletions: Set() 
(kafka.controller.TopicDeletionManager:66)
[2019-05-25 04:49:14,003] INFO [Controller id=0] Sending update metadata 
request (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,004] INFO [ReplicaStateMachine controllerId=0] 
Initializing replica state (kafka.controller.ZkReplicaStateMachine:66)
[2019-05-25 04:49:14,004] INFO [ReplicaStateMachine controllerId=0] Triggering 
online replica state changes (kafka.controller.ZkReplicaStateMachine:66)
[2019-05-25 04:49:14,004] INFO [ReplicaStateMachine controllerId=0] Triggering 
offline replica state changes (kafka.controller.ZkReplicaStateMachine:66)
[2019-05-25 04:49:14,004] INFO [PartitionStateMachine controllerId=0] 
Initializing partition state (kafka.controller.ZkPartitionStateMachine:66)
[2019-05-25 04:49:14,004] INFO [PartitionStateMachine controllerId=0] 
Triggering online partition state changes 
(kafka.controller.ZkPartitionStateMachine:66)
[2019-05-25 04:49:14,004] INFO [Controller id=0] Ready to serve as the new 
controller with epoch 1 (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,004] INFO [Controller id=0] Removing partitions Set() from 
the list of reassigned partitions in zookeeper 
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,004] INFO [Controller id=0] No more partitions need to be 
reassigned. Deleting zk path /admin/reassign_partitions 
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,005] INFO [RequestSendThread controllerId=0] Controller 0 
connected to localhost:37841 (id: 0 rack: null) for sending state change 
requests (kafka.controller.RequestSendThread:66)
[2019-05-25 04:49:14,011] INFO [Controller id=0] Partitions undergoing 
preferred replica election:  (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,011] INFO [Controller id=0] Partitions that completed 
preferred replica election:  (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,011] INFO [Controller id=0] Skipping preferred replica 
election for partitions due to topic deletion:  
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,012] INFO [Controller id=0] Resuming preferred replica 
election for partitions:  (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,012] INFO [Controller id=0] Starting preferred replica 
leader election for partitions  (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,013] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:multi cxid:0x38 zxid:0x1c txntype:-1 
reqpath:n/a aborting remaining multi ops. Error 
Path:/admin/preferred_replica_election Error:KeeperErrorCode = NoNode for 
/admin/preferred_replica_election 
(org.apache.zookeeper.server.PrepRequestProcessor:596)
[2019-05-25 04:49:14,015] INFO [SocketServer brokerId=0] Started data-plane 
processors for 1 acceptors (kafka.network.SocketServer:66)
[2019-05-25 04:49:14,015] INFO Kafka version: 5.3.0-ccs-SNAPSHOT 
(org.apache.kafka.common.utils.AppInfoParser:117)
[2019-05-25 04:49:14,015] INFO Kafka commitId: a9f6e87b7820377c 
(org.apache.kafka.common.utils.AppInfoParser:118)
[2019-05-25 04:49:14,015] INFO Kafka startTimeMs: 1558759753345 
(org.apache.kafka.common.utils.AppInfoParser:119)
[2019-05-25 04:49:14,015] INFO [KafkaServer id=0] started 
(kafka.server.KafkaServer:66)
[2019-05-25 04:49:14,016] INFO AdminClientConfig values: 
        bootstrap.servers = [localhost:37841]
        client.dns.lookup = default
        client.id = 
        connections.max.idle.ms = 300000
        metadata.max.age.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        receive.buffer.bytes = 65536
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 120000
        retries = 5
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        send.buffer.bytes = 131072
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = https
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
 (org.apache.kafka.clients.admin.AdminClientConfig:346)
[2019-05-25 04:49:14,017] INFO [Controller id=0] Starting the controller 
scheduler (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,018] INFO Kafka version: 5.3.0-ccs-SNAPSHOT 
(org.apache.kafka.common.utils.AppInfoParser:117)
[2019-05-25 04:49:14,018] INFO Kafka commitId: a9f6e87b7820377c 
(org.apache.kafka.common.utils.AppInfoParser:118)
[2019-05-25 04:49:14,018] INFO Kafka startTimeMs: 1558759754018 
(org.apache.kafka.common.utils.AppInfoParser:119)
[2019-05-25 04:49:14,044] INFO Creating topic stream-one-0 with configuration 
{} and initial partition assignment Map(2 -> ArrayBuffer(0), 1 -> 
ArrayBuffer(0), 0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient:66)
[2019-05-25 04:49:14,044] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:setData cxid:0x3e zxid:0x1d txntype:-1 
reqpath:n/a Error Path:/config/topics/stream-one-0 Error:KeeperErrorCode = 
NoNode for /config/topics/stream-one-0 
(org.apache.zookeeper.server.PrepRequestProcessor:653)
[2019-05-25 04:49:14,070] INFO [Controller id=0] New topics: 
[Set(stream-one-0)], deleted topics: [Set()], new partition replica assignment 
[Map(stream-one-0-2 -> Vector(0), stream-one-0-1 -> Vector(0), stream-one-0-0 
-> Vector(0))] (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,070] INFO [Controller id=0] New partition creation 
callback for stream-one-0-2,stream-one-0-1,stream-one-0-0 
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,097] INFO [ReplicaFetcherManager on broker 0] Removed 
fetcher for partitions Set(stream-one-0-1, stream-one-0-2, stream-one-0-0) 
(kafka.server.ReplicaFetcherManager:66)
[2019-05-25 04:49:14,113] INFO [Log partition=stream-one-0-1, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Loading producer 
state till offset 0 with message format version 2 (kafka.log.Log:66)
[2019-05-25 04:49:14,114] INFO [Log partition=stream-one-0-1, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Completed load of 
log with 1 segments, log start offset 0 and log end offset 0 in 0 ms 
(kafka.log.Log:66)
[2019-05-25 04:49:14,115] INFO Created log for partition stream-one-0-1 in 
/tmp/junit7423677051290926261/junit6332204664748699761 with properties 
{compression.type -> producer, message.downconversion.enable -> true, 
min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], 
flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 
604800000, flush.messages -> 9223372036854775807, message.format.version -> 
2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 
9223372036854775807, max.message.bytes -> 1000000, min.compaction.lag.ms -> 0, 
message.timestamp.type -> CreateTime, preallocate -> false, 
min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, 
unclean.leader.election.enable -> false, retention.bytes -> -1, 
delete.retention.ms -> 86400000, segment.ms -> 604800000, 
message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes 
-> 10485760}. (kafka.log.LogManager:66)
[2019-05-25 04:49:14,115] INFO [Partition stream-one-0-1 broker=0] No 
checkpointed highwatermark is found for partition stream-one-0-1 
(kafka.cluster.Partition:66)
[2019-05-25 04:49:14,115] INFO Replica loaded for partition stream-one-0-1 with 
initial high watermark 0 (kafka.cluster.Replica:66)
[2019-05-25 04:49:14,115] INFO [Partition stream-one-0-1 broker=0] 
stream-one-0-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch 
was: -1 (kafka.cluster.Partition:66)
[2019-05-25 04:49:14,178] INFO [Log partition=stream-one-0-2, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Loading producer 
state till offset 0 with message format version 2 (kafka.log.Log:66)
[2019-05-25 04:49:14,179] INFO [Log partition=stream-one-0-2, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Completed load of 
log with 1 segments, log start offset 0 and log end offset 0 in 0 ms 
(kafka.log.Log:66)
[2019-05-25 04:49:14,180] INFO Created log for partition stream-one-0-2 in 
/tmp/junit7423677051290926261/junit6332204664748699761 with properties 
{compression.type -> producer, message.downconversion.enable -> true, 
min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], 
flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 
604800000, flush.messages -> 9223372036854775807, message.format.version -> 
2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 
9223372036854775807, max.message.bytes -> 1000000, min.compaction.lag.ms -> 0, 
message.timestamp.type -> CreateTime, preallocate -> false, 
min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, 
unclean.leader.election.enable -> false, retention.bytes -> -1, 
delete.retention.ms -> 86400000, segment.ms -> 604800000, 
message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes 
-> 10485760}. (kafka.log.LogManager:66)
[2019-05-25 04:49:14,180] INFO [Partition stream-one-0-2 broker=0] No 
checkpointed highwatermark is found for partition stream-one-0-2 
(kafka.cluster.Partition:66)
[2019-05-25 04:49:14,180] INFO Replica loaded for partition stream-one-0-2 with 
initial high watermark 0 (kafka.cluster.Replica:66)
[2019-05-25 04:49:14,180] INFO [Partition stream-one-0-2 broker=0] 
stream-one-0-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch 
was: -1 (kafka.cluster.Partition:66)
[2019-05-25 04:49:14,338] INFO [Log partition=stream-one-0-0, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Loading producer 
state till offset 0 with message format version 2 (kafka.log.Log:66)
[2019-05-25 04:49:14,339] INFO [Log partition=stream-one-0-0, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Completed load of 
log with 1 segments, log start offset 0 and log end offset 0 in 0 ms 
(kafka.log.Log:66)
[2019-05-25 04:49:14,340] INFO Created log for partition stream-one-0-0 in 
/tmp/junit7423677051290926261/junit6332204664748699761 with properties 
{compression.type -> producer, message.downconversion.enable -> true, 
min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], 
flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 
604800000, flush.messages -> 9223372036854775807, message.format.version -> 
2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 
9223372036854775807, max.message.bytes -> 1000000, min.compaction.lag.ms -> 0, 
message.timestamp.type -> CreateTime, preallocate -> false, 
min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, 
unclean.leader.election.enable -> false, retention.bytes -> -1, 
delete.retention.ms -> 86400000, segment.ms -> 604800000, 
message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes 
-> 10485760}. (kafka.log.LogManager:66)
[2019-05-25 04:49:14,340] INFO [Partition stream-one-0-0 broker=0] No 
checkpointed highwatermark is found for partition stream-one-0-0 
(kafka.cluster.Partition:66)
[2019-05-25 04:49:14,341] INFO Replica loaded for partition stream-one-0-0 with 
initial high watermark 0 (kafka.cluster.Replica:66)
[2019-05-25 04:49:14,347] INFO [Partition stream-one-0-0 broker=0] 
stream-one-0-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch 
was: -1 (kafka.cluster.Partition:66)
[2019-05-25 04:49:14,402] INFO AdminClientConfig values: 
        bootstrap.servers = [localhost:37841]
        client.dns.lookup = default
        client.id = 
        connections.max.idle.ms = 300000
        metadata.max.age.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        receive.buffer.bytes = 65536
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 120000
        retries = 5
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        send.buffer.bytes = 131072
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = https
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
 (org.apache.kafka.clients.admin.AdminClientConfig:346)
[2019-05-25 04:49:14,405] INFO Kafka version: 5.3.0-ccs-SNAPSHOT 
(org.apache.kafka.common.utils.AppInfoParser:117)
[2019-05-25 04:49:14,405] INFO Kafka commitId: a9f6e87b7820377c 
(org.apache.kafka.common.utils.AppInfoParser:118)
[2019-05-25 04:49:14,406] INFO Kafka startTimeMs: 1558759754405 
(org.apache.kafka.common.utils.AppInfoParser:119)
[2019-05-25 04:49:14,428] INFO Creating topic user-sessions-0 with 
configuration {} and initial partition assignment Map(0 -> ArrayBuffer(0)) 
(kafka.zk.AdminZkClient:66)
[2019-05-25 04:49:14,429] INFO Got user-level KeeperException when processing 
sessionid:0x100aaf545600000 type:setData cxid:0x4e zxid:0x27 txntype:-1 
reqpath:n/a Error Path:/config/topics/user-sessions-0 Error:KeeperErrorCode = 
NoNode for /config/topics/user-sessions-0 
(org.apache.zookeeper.server.PrepRequestProcessor:653)
[2019-05-25 04:49:14,432] INFO [Controller id=0] New topics: 
[Set(user-sessions-0)], deleted topics: [Set()], new partition replica 
assignment [Map(user-sessions-0-0 -> Vector(0))] 
(kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,432] INFO [Controller id=0] New partition creation 
callback for user-sessions-0-0 (kafka.controller.KafkaController:66)
[2019-05-25 04:49:14,444] INFO [ReplicaFetcherManager on broker 0] Removed 
fetcher for partitions Set(user-sessions-0-0) 
(kafka.server.ReplicaFetcherManager:66)
[2019-05-25 04:49:14,456] INFO [Log partition=user-sessions-0-0, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Loading producer 
state till offset 0 with message format version 2 (kafka.log.Log:66)
[2019-05-25 04:49:14,457] INFO [Log partition=user-sessions-0-0, 
dir=/tmp/junit7423677051290926261/junit6332204664748699761] Completed load of 
log with 1 segments, log start offset 0 and log end offset 0 in 0 ms 
(kafka.log.Log:66)
[2019-05-25 04:49:14,458] INFO Created log for partition user-sessions-0-0 in 
/tmp/junit7423677051290926261/junit6332204664748699761 with properties 
{compression.type -> producer, message.downconversion.enable -> true, 
min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], 
flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 
604800000, flush.messages -> 9223372036854775807, message.format.version -> 
2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 
9223372036854775807, max.message.bytes -> 1000000, min.compaction.lag.ms -> 0, 
message.timestamp.type -> CreateTime, preallocate -> false, 
min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, 
unclean.leader.election.enable -> false, retention.bytes -> -1, 
delete.retention.ms -> 86400000, segment.ms -> 604800000, 
message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes 
-> 10485760}. (kafka.log.LogManager:66)
[2019-05-25 04:49:14,458] INFO [Partition user-sessions-0-0 broker=0] No 
checkpointed highwatermark is found for partition user-sessions-0-0 
(kafka.cluster.Partition:66)
[2019-05-25 04:49:14,458] INFO Replica loaded for partition user-sessions-0-0 
with initial high watermark 0 (kafka.cluster.Replica:66)
[2019-05-25 04:49:14,458] INFO [Partition user-sessions-0-0 broker=0] 
user-sessions-0-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch 
was: -1 (kafka.cluster.Partition:66)
[2019-05-25 04:49:14,761] INFO AdminClientConfig values: 
        bootstrap.servers = [localhost:37841]
        client.dns.lookup = default
        client.id = 
        connections.max.idle.ms = 300000
        metadata.max.age.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        receive.buffer.bytes = 65536
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 120000
        retries = 5
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        send.buffer.bytes = 131072
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1,
...[truncated 815783 chars]...
le -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> 
compact,delete, flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, 
retention.ms -> 172800000, flush.messages -> 9223372036854775807, 
message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, 
max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000000, 
min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate 
-> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, 
unclean.leader.election.enable -> false, retention.bytes -> -1, 
delete.retention.ms -> 86400000, segment.ms -> 604800000, 
message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes 
-> 10485760}. (kafka.log.LogManager:66)
[2019-05-25 04:51:42,415] INFO [Partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0 
broker=0] No checkpointed highwatermark is found for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0 
(kafka.cluster.Partition:66)
[2019-05-25 04:51:42,415] INFO Replica loaded for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0 
with initial high watermark 0 (kafka.cluster.Replica:66)
[2019-05-25 04:51:42,416] INFO [Partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0 
broker=0] 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0 
starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 
(kafka.cluster.Partition:66)
[2019-05-25 04:51:42,439] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer]
 Assigned tasks to clients as 
{5b09b3d5-6033-40e0-a2bb-97f38bcffae2=[activeTasks: ([0_0, 0_1, 1_0, 0_2, 1_1, 
1_2]) standbyTasks: ([]) assignedTasks: ([0_0, 0_1, 1_0, 0_2, 1_1, 1_2]) 
prevActiveTasks: ([]) prevStandbyTasks: ([]) prevAssignedTasks: ([]) capacity: 
1]}. (org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor:636)
[2019-05-25 04:51:42,443] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] The following not-subscribed topics are 
assigned, and their metadata will be fetched from the brokers: 
[kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition] 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:448)
[2019-05-25 04:51:42,447] INFO [GroupCoordinator 0]: Assignment received from 
leader for group kgrouped-stream-test-10 for generation 1 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:42,449] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Successfully joined group with generation 1 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:469)
[2019-05-25 04:51:42,460] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Setting newly assigned partitions: 
stream-one-9-1, 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-2, 
stream-one-9-2, 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-0, 
stream-one-9-0, 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-1 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:283)
[2019-05-25 04:51:42,460] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
State transition from PARTITIONS_REVOKED to PARTITIONS_ASSIGNED 
(org.apache.kafka.streams.processor.internals.StreamThread:212)
[2019-05-25 04:51:42,464] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
partition assignment took 4 ms.
        current active tasks: [0_0, 0_1, 1_0, 0_2, 1_1, 1_2]
        current standby tasks: []
        previous active tasks: []
 (org.apache.kafka.streams.processor.internals.StreamThread:288)
[2019-05-25 04:51:42,483] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
stream-one-9-1 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,483] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-2 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,483] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
stream-one-9-2 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,483] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-0 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,483] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
stream-one-9-0 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,483] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-1 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,488] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Resetting offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-2 
to offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:42,488] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Resetting offset for partition stream-one-9-1 
to offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:42,488] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Resetting offset for partition stream-one-9-2 
to offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:42,488] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Resetting offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-0 
to offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:42,488] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Resetting offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-1 
to offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:42,488] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Resetting offset for partition stream-one-9-0 
to offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:42,571] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-0 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,580] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-1 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,582] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-consumer,
 groupId=kgrouped-stream-test-10] Found no committed offset for partition 
kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-2 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:42,596] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-restore-consumer,
 groupId=null] Cluster ID: hU85BYcSQp275IVrGRHt3A 
(org.apache.kafka.clients.Metadata:266)
[2019-05-25 04:51:42,601] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-restore-consumer,
 groupId=null] Unsubscribed all topics or patterns and assigned partitions 
(org.apache.kafka.clients.consumer.KafkaConsumer:1068)
[2019-05-25 04:51:42,628] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-restore-consumer,
 groupId=null] Unsubscribed all topics or patterns and assigned partitions 
(org.apache.kafka.clients.consumer.KafkaConsumer:1068)
[2019-05-25 04:51:42,628] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
State transition from PARTITIONS_ASSIGNED to RUNNING 
(org.apache.kafka.streams.processor.internals.StreamThread:212)
[2019-05-25 04:51:42,629] INFO stream-client 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2] State transition 
from REBALANCING to RUNNING (org.apache.kafka.streams.KafkaStreams:263)
[2019-05-25 04:51:43,294] INFO Opening store 
KSTREAM-AGGREGATE-STATE-STORE-0000000002.1558742400000 in upgrade mode 
(org.apache.kafka.streams.state.internals.RocksDBTimestampedStore:94)
[2019-05-25 04:51:43,330] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,330] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,330] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,330] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,406] INFO [Log 
partition=kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-1,
 dir=/tmp/junit7423677051290926261/junit6332204664748699761] Incrementing log 
start offset to 4 (kafka.log.Log:66)
[2019-05-25 04:51:43,427] INFO [GroupCoordinator 0]: Member 
kgrouped-stream-test-7-a62fa780-5316-449d-bb38-32f411137f03-StreamThread-1-consumer-54baec27-9f92-4a1c-8454-2c1d182db87c
 in group kgrouped-stream-test-7 has failed, removing it from the group 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,427] INFO [GroupCoordinator 0]: Preparing to rebalance 
group kgrouped-stream-test-7 in state PreparingRebalance with old generation 1 
(__consumer_offsets-7) (reason: removing member 
kgrouped-stream-test-7-a62fa780-5316-449d-bb38-32f411137f03-StreamThread-1-consumer-54baec27-9f92-4a1c-8454-2c1d182db87c
 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,427] INFO [GroupCoordinator 0]: Group 
kgrouped-stream-test-7 with generation 2 is now empty (__consumer_offsets-7) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,509] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,509] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,510] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,510] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,510] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,510] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,510] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,511] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,516] INFO Opening store 
KSTREAM-AGGREGATE-STATE-STORE-0000000002.1558742400000 in upgrade mode 
(org.apache.kafka.streams.state.internals.RocksDBTimestampedStore:94)
[2019-05-25 04:51:43,562] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,563] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,563] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,574] INFO [Consumer clientId=consumer-15, 
groupId=kgroupedstream-test-10] Member 
consumer-15-7a6ae247-6397-4ed9-b5ee-a0791f0b82b9 sending LeaveGroup request to 
coordinator localhost:37841 (id: 2147483647 rack: null) 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:879)
[2019-05-25 04:51:43,575] INFO [GroupCoordinator 0]: Member 
consumer-15-7a6ae247-6397-4ed9-b5ee-a0791f0b82b9 in group 
kgroupedstream-test-10 has left, removing it from the group 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,575] INFO [GroupCoordinator 0]: Preparing to rebalance 
group kgroupedstream-test-10 in state PreparingRebalance with old generation 1 
(__consumer_offsets-34) (reason: removing member 
consumer-15-7a6ae247-6397-4ed9-b5ee-a0791f0b82b9 on LeaveGroup) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,575] INFO [GroupCoordinator 0]: Group 
kgroupedstream-test-10 with generation 2 is now empty (__consumer_offsets-34) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,584] INFO ConsumerConfig values: 
        allow.auto.create.topics = true
        auto.commit.interval.ms = 5000
        auto.offset.reset = earliest
        bootstrap.servers = [localhost:37841]
        check.crcs = true
        client.dns.lookup = default
        client.id = 
        client.rack = 
        connections.max.idle.ms = 540000
        default.api.timeout.ms = 60000
        enable.auto.commit = false
        exclude.internal.topics = true
        fetch.max.bytes = 52428800
        fetch.max.wait.ms = 500
        fetch.min.bytes = 1
        group.id = console-consumer-38403
        group.instance.id = null
        heartbeat.interval.ms = 3000
        interceptor.classes = []
        internal.leave.group.on.close = true
        isolation.level = read_uncommitted
        key.deserializer = class 
org.apache.kafka.common.serialization.ByteArrayDeserializer
        max.partition.fetch.bytes = 1048576
        max.poll.interval.ms = 300000
        max.poll.records = 500
        metadata.max.age.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        partition.assignment.strategy = [class 
org.apache.kafka.clients.consumer.RangeAssignor]
        receive.buffer.bytes = 65536
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 30000
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        send.buffer.bytes = 131072
        session.timeout.ms = 10000
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = https
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        value.deserializer = class 
org.apache.kafka.common.serialization.ByteArrayDeserializer
 (org.apache.kafka.clients.consumer.ConsumerConfig:346)
[2019-05-25 04:51:43,612] INFO Kafka version: 5.3.0-ccs-SNAPSHOT 
(org.apache.kafka.common.utils.AppInfoParser:117)
[2019-05-25 04:51:43,613] INFO Kafka commitId: a9f6e87b7820377c 
(org.apache.kafka.common.utils.AppInfoParser:118)
[2019-05-25 04:51:43,613] INFO Kafka startTimeMs: 1558759903612 
(org.apache.kafka.common.utils.AppInfoParser:119)
[2019-05-25 04:51:43,613] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Subscribed to topic(s): output-9 
(org.apache.kafka.clients.consumer.KafkaConsumer:964)
[2019-05-25 04:51:43,653] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Cluster ID: hU85BYcSQp275IVrGRHt3A 
(org.apache.kafka.clients.Metadata:266)
[2019-05-25 04:51:43,653] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Discovered group coordinator localhost:37841 
(id: 2147483647 rack: null) 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:728)
[2019-05-25 04:51:43,659] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Revoking previously assigned partitions [] 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:477)
[2019-05-25 04:51:43,659] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] (Re-)joining group 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:505)
[2019-05-25 04:51:43,660] INFO [Log 
partition=kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-1,
 dir=/tmp/junit7423677051290926261/junit6332204664748699761] Incrementing log 
start offset to 12 (kafka.log.Log:66)
[2019-05-25 04:51:43,665] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] (Re-)joining group 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:505)
[2019-05-25 04:51:43,665] INFO [Log 
partition=kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition-0,
 dir=/tmp/junit7423677051290926261/junit6332204664748699761] Incrementing log 
start offset to 3 (kafka.log.Log:66)
[2019-05-25 04:51:43,666] INFO [GroupCoordinator 0]: Preparing to rebalance 
group console-consumer-38403 in state PreparingRebalance with old generation 0 
(__consumer_offsets-29) (reason: Adding new member 
consumer-16-0b12dd4d-85c8-43e1-8f3c-740f62970246 with group instanceid None) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,699] INFO [GroupCoordinator 0]: Stabilized group 
console-consumer-38403 generation 1 (__consumer_offsets-29) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,710] INFO [GroupCoordinator 0]: Assignment received from 
leader for group console-consumer-38403 for generation 1 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,712] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Successfully joined group with generation 1 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:469)
[2019-05-25 04:51:43,713] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Setting newly assigned partitions: output-9-0 
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:283)
[2019-05-25 04:51:43,719] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Found no committed offset for partition 
output-9-0 (org.apache.kafka.clients.consumer.internals.ConsumerCoordinator:983)
[2019-05-25 04:51:43,724] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Resetting offset for partition output-9-0 to 
offset FetchPosition{offset=0, offsetEpoch=Optional[0], 
currentLeader=LeaderAndEpoch{leader=localhost:37841 (id: 0 rack: null), 
epoch=0}}. (org.apache.kafka.clients.consumer.internals.Fetcher:679)
[2019-05-25 04:51:43,730] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,730] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,731] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,731] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,731] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,732] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,732] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,732] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,733] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,733] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,733] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,734] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,734] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,735] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,747] WARN Warning: window end time was truncated to 
Long.MAX (org.apache.kafka.streams.state.internals.WindowKeySchema:109)
[2019-05-25 04:51:43,756] INFO [Consumer clientId=consumer-16, 
groupId=console-consumer-38403] Member 
consumer-16-0b12dd4d-85c8-43e1-8f3c-740f62970246 sending LeaveGroup request to 
coordinator localhost:37841 (id: 2147483647 rack: null) 
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator:879)
[2019-05-25 04:51:43,759] INFO [GroupCoordinator 0]: Member 
consumer-16-0b12dd4d-85c8-43e1-8f3c-740f62970246 in group 
console-consumer-38403 has left, removing it from the group 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,759] INFO [GroupCoordinator 0]: Preparing to rebalance 
group console-consumer-38403 in state PreparingRebalance with old generation 1 
(__consumer_offsets-29) (reason: removing member 
consumer-16-0b12dd4d-85c8-43e1-8f3c-740f62970246 on LeaveGroup) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,760] INFO [GroupCoordinator 0]: Group 
console-consumer-38403 with generation 2 is now empty (__consumer_offsets-29) 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:43,766] INFO stream-client 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2] State transition 
from RUNNING to PENDING_SHUTDOWN (org.apache.kafka.streams.KafkaStreams:263)
[2019-05-25 04:51:43,787] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
Informed to shut down 
(org.apache.kafka.streams.processor.internals.StreamThread:1192)
[2019-05-25 04:51:43,787] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
State transition from RUNNING to PENDING_SHUTDOWN 
(org.apache.kafka.streams.processor.internals.StreamThread:212)
[2019-05-25 04:51:43,856] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
Shutting down (org.apache.kafka.streams.processor.internals.StreamThread:1206)
[2019-05-25 04:51:43,936] INFO [Consumer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-restore-consumer,
 groupId=null] Unsubscribed all topics or patterns and assigned partitions 
(org.apache.kafka.clients.consumer.KafkaConsumer:1068)
[2019-05-25 04:51:43,936] INFO [Producer 
clientId=kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1-producer]
 Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms. 
(org.apache.kafka.clients.producer.KafkaProducer:1153)
[2019-05-25 04:51:43,942] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
State transition from PENDING_SHUTDOWN to DEAD 
(org.apache.kafka.streams.processor.internals.StreamThread:212)
[2019-05-25 04:51:43,950] INFO stream-thread 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2-StreamThread-1] 
Shutdown complete 
(org.apache.kafka.streams.processor.internals.StreamThread:1226)
[2019-05-25 04:51:43,952] INFO stream-client 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2] State transition 
from PENDING_SHUTDOWN to NOT_RUNNING (org.apache.kafka.streams.KafkaStreams:263)
[2019-05-25 04:51:43,952] INFO stream-client 
[kgrouped-stream-test-10-5b09b3d5-6033-40e0-a2bb-97f38bcffae2] Streams client 
stopped completely (org.apache.kafka.streams.KafkaStreams:898)
[2019-05-25 04:51:43,959] INFO [KafkaServer id=0] shutting down 
(kafka.server.KafkaServer:66)
[2019-05-25 04:51:43,960] INFO [KafkaServer id=0] Starting controlled shutdown 
(kafka.server.KafkaServer:66)
[2019-05-25 04:51:43,967] INFO [Controller id=0] Shutting down broker 0 
(kafka.controller.KafkaController:66)
[2019-05-25 04:51:43,977] INFO [KafkaServer id=0] Controlled shutdown succeeded 
(kafka.server.KafkaServer:66)
[2019-05-25 04:51:43,979] INFO [/config/changes-event-process-thread]: Shutting 
down (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread:66)
[2019-05-25 04:51:43,979] INFO [/config/changes-event-process-thread]: Stopped 
(kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread:66)
[2019-05-25 04:51:43,979] INFO [/config/changes-event-process-thread]: Shutdown 
completed 
(kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread:66)
[2019-05-25 04:51:43,980] INFO [SocketServer brokerId=0] Stopping socket server 
request processors (kafka.network.SocketServer:66)
[2019-05-25 04:51:43,983] INFO [SocketServer brokerId=0] Stopped socket server 
request processors (kafka.network.SocketServer:66)
[2019-05-25 04:51:43,983] INFO [data-plane Kafka Request Handler on Broker 0], 
shutting down (kafka.server.KafkaRequestHandlerPool:66)
[2019-05-25 04:51:43,985] INFO [data-plane Kafka Request Handler on Broker 0], 
shut down completely (kafka.server.KafkaRequestHandlerPool:66)
[2019-05-25 04:51:43,991] INFO [KafkaApi-0] Shutdown complete. 
(kafka.server.KafkaApis:66)
[2019-05-25 04:51:43,991] INFO [ExpirationReaper-0-topic]: Shutting down 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,134] INFO [ExpirationReaper-0-topic]: Stopped 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,135] INFO [ExpirationReaper-0-topic]: Shutdown completed 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,135] INFO [TransactionCoordinator id=0] Shutting down. 
(kafka.coordinator.transaction.TransactionCoordinator:66)
[2019-05-25 04:51:44,139] INFO [ProducerId Manager 0]: Shutdown complete: last 
producerId assigned 0 (kafka.coordinator.transaction.ProducerIdManager:66)
[2019-05-25 04:51:44,139] INFO [Transaction State Manager 0]: Shutdown complete 
(kafka.coordinator.transaction.TransactionStateManager:66)
[2019-05-25 04:51:44,139] INFO [Transaction Marker Channel Manager 0]: Shutting 
down (kafka.coordinator.transaction.TransactionMarkerChannelManager:66)
[2019-05-25 04:51:44,140] INFO [Transaction Marker Channel Manager 0]: Stopped 
(kafka.coordinator.transaction.TransactionMarkerChannelManager:66)
[2019-05-25 04:51:44,140] INFO [Transaction Marker Channel Manager 0]: Shutdown 
completed (kafka.coordinator.transaction.TransactionMarkerChannelManager:66)
[2019-05-25 04:51:44,141] INFO [TransactionCoordinator id=0] Shutdown complete. 
(kafka.coordinator.transaction.TransactionCoordinator:66)
[2019-05-25 04:51:44,142] INFO [GroupCoordinator 0]: Shutting down. 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:44,146] INFO [ExpirationReaper-0-Heartbeat]: Shutting down 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,231] INFO [ExpirationReaper-0-Heartbeat]: Stopped 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,231] INFO [ExpirationReaper-0-Heartbeat]: Shutdown 
completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,232] INFO [ExpirationReaper-0-Rebalance]: Shutting down 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,268] INFO [ExpirationReaper-0-Rebalance]: Stopped 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,268] INFO [ExpirationReaper-0-Rebalance]: Shutdown 
completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,268] INFO [GroupCoordinator 0]: Shutdown complete. 
(kafka.coordinator.group.GroupCoordinator:66)
[2019-05-25 04:51:44,268] INFO [ReplicaManager broker=0] Shutting down 
(kafka.server.ReplicaManager:66)
[2019-05-25 04:51:44,269] INFO [LogDirFailureHandler]: Shutting down 
(kafka.server.ReplicaManager$LogDirFailureHandler:66)
[2019-05-25 04:51:44,269] INFO [LogDirFailureHandler]: Stopped 
(kafka.server.ReplicaManager$LogDirFailureHandler:66)
[2019-05-25 04:51:44,269] INFO [LogDirFailureHandler]: Shutdown completed 
(kafka.server.ReplicaManager$LogDirFailureHandler:66)
[2019-05-25 04:51:44,269] INFO [ReplicaFetcherManager on broker 0] shutting 
down (kafka.server.ReplicaFetcherManager:66)
[2019-05-25 04:51:44,270] INFO [ReplicaFetcherManager on broker 0] shutdown 
completed (kafka.server.ReplicaFetcherManager:66)
[2019-05-25 04:51:44,270] INFO [ReplicaAlterLogDirsManager on broker 0] 
shutting down (kafka.server.ReplicaAlterLogDirsManager:66)
[2019-05-25 04:51:44,270] INFO [ReplicaAlterLogDirsManager on broker 0] 
shutdown completed (kafka.server.ReplicaAlterLogDirsManager:66)
[2019-05-25 04:51:44,270] INFO [ExpirationReaper-0-Fetch]: Shutting down 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,441] INFO [ExpirationReaper-0-Fetch]: Stopped 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,441] INFO [ExpirationReaper-0-Fetch]: Shutdown completed 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,442] INFO [ExpirationReaper-0-Produce]: Shutting down 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,535] INFO [ExpirationReaper-0-Produce]: Stopped 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,535] INFO [ExpirationReaper-0-Produce]: Shutdown completed 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,535] INFO [ExpirationReaper-0-DeleteRecords]: Shutting 
down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,559] INFO [ExpirationReaper-0-DeleteRecords]: Stopped 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,559] INFO [ExpirationReaper-0-DeleteRecords]: Shutdown 
completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,560] INFO [ExpirationReaper-0-ElectPreferredLeader]: 
Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,739] INFO [ExpirationReaper-0-ElectPreferredLeader]: 
Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,739] INFO [ExpirationReaper-0-ElectPreferredLeader]: 
Shutdown completed 
(kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper:66)
[2019-05-25 04:51:44,746] INFO [ReplicaManager broker=0] Shut down completely 
(kafka.server.ReplicaManager:66)
[2019-05-25 04:51:44,747] INFO Shutting down. (kafka.log.LogManager:66)
[2019-05-25 04:51:44,747] INFO Shutting down the log cleaner. 
(kafka.log.LogCleaner:66)
[2019-05-25 04:51:44,748] INFO [kafka-log-cleaner-thread-0]: Shutting down 
(kafka.log.LogCleaner:66)
[2019-05-25 04:51:44,748] INFO [kafka-log-cleaner-thread-0]: Stopped 
(kafka.log.LogCleaner:66)
[2019-05-25 04:51:44,755] INFO [kafka-log-cleaner-thread-0]: Shutdown completed 
(kafka.log.LogCleaner:66)
[2019-05-25 04:51:44,762] INFO [ProducerStateManager 
partition=kgrouped-stream-test-7-KSTREAM-AGGREGATE-STATE-STORE-0000000003-changelog-0]
 Writing producer snapshot at offset 5 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,766] INFO [ProducerStateManager partition=stream-one-9-0] 
Writing producer snapshot at offset 3 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,768] INFO [ProducerStateManager partition=stream-one-1-2] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,773] INFO [ProducerStateManager 
partition=__consumer_offsets-8] Writing producer snapshot at offset 11 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,780] INFO [ProducerStateManager partition=output-5-0] 
Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,787] INFO [ProducerStateManager 
partition=__consumer_offsets-21] Writing producer snapshot at offset 3 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,794] INFO [ProducerStateManager partition=stream-one-9-2] 
Writing producer snapshot at offset 6 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,801] INFO [ProducerStateManager 
partition=__consumer_offsets-4] Writing producer snapshot at offset 13 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,808] INFO [ProducerStateManager 
partition=kgrouped-stream-test-8-KSTREAM-REDUCE-STATE-STORE-0000000002-changelog-0]
 Writing producer snapshot at offset 3 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,821] INFO [ProducerStateManager 
partition=kgrouped-stream-test-3-aggregate-by-selected-key-changelog-0] Writing 
producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,830] INFO [ProducerStateManager partition=stream-one-2-1] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,840] INFO [ProducerStateManager 
partition=__consumer_offsets-7] Writing producer snapshot at offset 5 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,851] INFO [ProducerStateManager 
partition=kgrouped-stream-test-5-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-2]
 Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,868] INFO [ProducerStateManager partition=output-9-0] 
Writing producer snapshot at offset 15 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,875] INFO [ProducerStateManager partition=stream-one-1-0] 
Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,889] INFO [ProducerStateManager 
partition=__consumer_offsets-9] Writing producer snapshot at offset 4 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,915] INFO [ProducerStateManager 
partition=kgrouped-stream-test-9-KSTREAM-AGGREGATE-STATE-STORE-0000000003-changelog-0]
 Writing producer snapshot at offset 13 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,931] INFO [ProducerStateManager 
partition=kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0]
 Writing producer snapshot at offset 3 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,943] INFO [ProducerStateManager partition=stream-one-5-0] 
Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,969] INFO [ProducerStateManager partition=stream-one-3-2] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,984] INFO [ProducerStateManager 
partition=kgrouped-stream-test-4-count-by-key-changelog-0] Writing producer 
snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:44,995] INFO [ProducerStateManager 
partition=kgrouped-stream-test-2-reduce-by-key-changelog-0] Writing producer 
snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,024] INFO [ProducerStateManager 
partition=__consumer_offsets-23] Writing producer snapshot at offset 15 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,036] INFO [ProducerStateManager partition=stream-one-9-1] 
Writing producer snapshot at offset 6 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,056] INFO [ProducerStateManager 
partition=kgrouped-stream-test-3-aggregate-by-selected-key-changelog-1] Writing 
producer snapshot at offset 8 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,083] INFO [ProducerStateManager partition=output-2-0] 
Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,090] INFO [ProducerStateManager 
partition=kgrouped-stream-test-5-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-1]
 Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,121] INFO [ProducerStateManager partition=stream-one-7-0] 
Writing producer snapshot at offset 3 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,139] INFO [ProducerStateManager partition=stream-one-2-0] 
Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,148] INFO [ProducerStateManager 
partition=__consumer_offsets-3] Writing producer snapshot at offset 13 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,161] INFO [ProducerStateManager 
partition=__consumer_offsets-18] Writing producer snapshot at offset 3 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:45,175] INFO [ProducerStateManager 
partition=user-sessions-6-0] Writing producer snapshot at offset 9 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,255] INFO [ProducerStateManager partition=stream-one-4-2] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,284] INFO [ProducerStateManager partition=stream-one-3-1] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,319] INFO [ProducerStateManager partition=stream-one-5-1] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,367] INFO [ProducerStateManager partition=stream-one-4-0] 
Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,380] INFO [ProducerStateManager 
partition=kgrouped-stream-test-5-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0]
 Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,384] INFO [ProducerStateManager partition=output-1-0] 
Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,407] INFO [ProducerStateManager 
partition=kgrouped-stream-test-6-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-0]
 Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,419] INFO [ProducerStateManager partition=stream-one-7-2] 
Writing producer snapshot at offset 6 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,426] INFO [ProducerStateManager 
partition=__consumer_offsets-17] Writing producer snapshot at offset 3 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,452] INFO [ProducerStateManager 
partition=kgrouped-stream-test-4-count-by-key-changelog-1] Writing producer 
snapshot at offset 8 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,468] INFO [ProducerStateManager partition=stream-one-7-1] 
Writing producer snapshot at offset 6 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,479] INFO [ProducerStateManager 
partition=__consumer_offsets-19] Writing producer snapshot at offset 3 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,487] INFO [ProducerStateManager 
partition=user-sessions-0-0] Writing producer snapshot at offset 10 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,496] INFO [ProducerStateManager partition=stream-one-5-2] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,527] INFO [ProducerStateManager partition=output-7-0] 
Writing producer snapshot at offset 15 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,563] INFO [ProducerStateManager partition=stream-one-3-0] 
Writing producer snapshot at offset 2 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,567] INFO [ProducerStateManager 
partition=__consumer_offsets-2] Writing producer snapshot at offset 12 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,580] INFO [ProducerStateManager 
partition=__consumer_offsets-6] Writing producer snapshot at offset 12 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,602] INFO [ProducerStateManager partition=stream-one-4-1] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,617] INFO [ProducerStateManager 
partition=user-sessions-8-0] Writing producer snapshot at offset 10 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,623] INFO [ProducerStateManager 
partition=kgrouped-stream-test-8-KSTREAM-REDUCE-STATE-STORE-0000000002-changelog-1]
 Writing producer snapshot at offset 12 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,632] INFO [ProducerStateManager 
partition=kgrouped-stream-test-6-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-1]
 Writing producer snapshot at offset 8 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,651] INFO [ProducerStateManager 
partition=__consumer_offsets-20] Writing producer snapshot at offset 3 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,667] INFO [ProducerStateManager 
partition=kgrouped-stream-test-2-reduce-by-key-changelog-1] Writing producer 
snapshot at offset 8 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,708] INFO [ProducerStateManager 
partition=__consumer_offsets-12] Writing producer snapshot at offset 2 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,719] INFO [ProducerStateManager partition=output-4-0] 
Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,757] INFO [ProducerStateManager partition=stream-one-1-1] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,765] INFO [ProducerStateManager 
partition=__consumer_offsets-5] Writing producer snapshot at offset 11 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,776] INFO [ProducerStateManager 
partition=__consumer_offsets-29] Writing producer snapshot at offset 2 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,797] INFO [ProducerStateManager 
partition=__consumer_offsets-34] Writing producer snapshot at offset 3 
(kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,813] INFO [ProducerStateManager partition=stream-one-2-2] 
Writing producer snapshot at offset 4 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,820] INFO [ProducerStateManager 
partition=kgrouped-stream-test-10-KSTREAM-AGGREGATE-STATE-STORE-0000000002-changelog-1]
 Writing producer snapshot at offset 12 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,825] INFO [ProducerStateManager partition=output-3-0] 
Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager:66)
[2019-05-25 04:51:46,880] INFO Shutdown complete. (kafka.log.LogManager:66)
[2019-05-25 04:51:46,881] INFO [ControllerEventThread controllerId=0] Shutting 
down (kafka.controller.ControllerEventManager$ControllerEventThread:66)
[2019-05-25 04:51:46,883] INFO [ControllerEventThread controllerId=0] Stopped 
(kafka.controller.ControllerEventManager$ControllerEventThread:66)
[2019-05-25 04:51:46,883] INFO [ControllerEventThread controllerId=0] Shutdown 
completed (kafka.controller.ControllerEventManager$ControllerEventThread:66)
[2019-05-25 04:51:46,887] INFO [PartitionStateMachine controllerId=0] Stopped 
partition state machine (kafka.controller.ZkPartitionStateMachine:66)
[2019-05-25 04:51:46,888] INFO [ReplicaStateMachine controllerId=0] Stopped 
replica state machine (kafka.controller.ZkReplicaStateMachine:66)
[2019-05-25 04:51:46,889] INFO [RequestSendThread controllerId=0] Shutting down 
(kafka.controller.RequestSendThread:66)
[2019-05-25 04:51:46,889] INFO [RequestSendThread controllerId=0] Stopped 
(kafka.controller.RequestSendThread:66)
[2019-05-25 04:51:46,895] INFO [RequestSendThread controllerId=0] Shutdown 
completed (kafka.controller.RequestSendThread:66)
[2019-05-25 04:51:46,896] INFO [Controller id=0] Resigned 
(kafka.controller.KafkaController:66)
[2019-05-25 04:51:46,897] INFO [ZooKeeperClient Kafka server] Closing. 
(kafka.zookeeper.ZooKeeperClient:66)
[2019-05-25 04:51:46,899] INFO Processed session termination for sessionid: 
0x100aaf545600000 (org.apache.zookeeper.server.PrepRequestProcessor:487)
[2019-05-25 04:51:46,900] INFO Closed socket connection for client 
/127.0.0.1:56622 which had sessionid 0x100aaf545600000 
(org.apache.zookeeper.server.NIOServerCnxn:1056)
[2019-05-25 04:51:46,903] INFO EventThread shut down for session: 
0x100aaf545600000 (org.apache.zookeeper.ClientCnxn:522)
[2019-05-25 04:51:46,903] INFO Session: 0x100aaf545600000 closed 
(org.apache.zookeeper.ZooKeeper:693)
[2019-05-25 04:51:46,904] INFO [ZooKeeperClient Kafka server] Closed. 
(kafka.zookeeper.ZooKeeperClient:66)
[2019-05-25 04:51:46,904] INFO [ThrottledChannelReaper-Fetch]: Shutting down 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:47,651] INFO [ThrottledChannelReaper-Fetch]: Stopped 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:47,654] INFO [ThrottledChannelReaper-Fetch]: Shutdown 
completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:47,654] INFO [ThrottledChannelReaper-Produce]: Shutting down 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:48,647] INFO [ThrottledChannelReaper-Produce]: Stopped 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:48,650] INFO [ThrottledChannelReaper-Produce]: Shutdown 
completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:48,650] INFO [ThrottledChannelReaper-Request]: Shutting down 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:48,651] INFO [ThrottledChannelReaper-Request]: Stopped 
(kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:48,652] INFO [ThrottledChannelReaper-Request]: Shutdown 
completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper:66)
[2019-05-25 04:51:48,652] INFO [SocketServer brokerId=0] Shutting down socket 
server (kafka.network.SocketServer:66)
[2019-05-25 04:51:48,676] INFO [SocketServer brokerId=0] Shutdown completed 
(kafka.network.SocketServer:66)
[2019-05-25 04:51:48,694] INFO [KafkaServer id=0] shut down completed 
(kafka.server.KafkaServer:66)
[2019-05-25 04:51:48,796] INFO shutting down 
(org.apache.zookeeper.server.ZooKeeperServer:502)
[2019-05-25 04:51:48,796] INFO Shutting down 
(org.apache.zookeeper.server.SessionTrackerImpl:226)
[2019-05-25 04:51:48,797] INFO Shutting down 
(org.apache.zookeeper.server.PrepRequestProcessor:769)
[2019-05-25 04:51:48,797] INFO Shutting down 
(org.apache.zookeeper.server.SyncRequestProcessor:208)
[2019-05-25 04:51:48,797] INFO PrepRequestProcessor exited loop! 
(org.apache.zookeeper.server.PrepRequestProcessor:144)
[2019-05-25 04:51:48,797] INFO SyncRequestProcessor exited! 
(org.apache.zookeeper.server.SyncRequestProcessor:186)
[2019-05-25 04:51:48,798] INFO shutdown of request processor complete 
(org.apache.zookeeper.server.FinalRequestProcessor:430)
[2019-05-25 04:51:48,801] INFO NIOServerCnxn factory exited run method 
(org.apache.zookeeper.server.NIOServerCnxnFactory:249)
{noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to