Jenkins build is back to normal : Kafka » kafka-trunk-jdk8 #276

2020-12-09 Thread Apache Jenkins Server
See 




Build failed in Jenkins: Kafka » kafka-trunk-jdk15 #322

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: remove duplicate code from resetByDuration (#9699)

[github] MINOR: Fix some java docs of ReplicaStateMachine (#8552)


--
[...truncated 3.49 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@62a7729d, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@1bd011d6, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@1bd011d6, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordsFromKeyValuePairs STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordsFromKeyValuePairs PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKeyAndDefaultTimestamp 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKeyAndDefaultTimestamp 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithDefaultTimestamp 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithDefaultTimestamp 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKey STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKey PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecordWithOtherTopicNameAndTimestampWithTimetamp 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecordWithOtherTopicNameAndTimestampWithTimetamp 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithTimestamp STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithTimestamp PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullHeaders STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullHeaders PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithDefaultTimestamp STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithDefaultTimestamp PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicName STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicName PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithNullKey STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithNullKey PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecord STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecord PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecord STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecord PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithOtherTopicName STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithOtherTopicName PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > shouldAdvanceTime 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > shouldAdvanceTime 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithKeyValuePairs STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithKeyValuePairs PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithKeyValuePairsAndCustomTimestamps
 STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithKeyValuePairsAndCustomTimestamps
 PASSED


Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #301

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: remove duplicate code from resetByDuration (#9699)

[github] MINOR: Fix some java docs of ReplicaStateMachine (#8552)


--
[...truncated 3.48 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@4215fdab,
 timestamped = true, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@4215fdab,
 timestamped = true, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@2a72cadb,
 timestamped = true, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@2a72cadb,
 timestamped = true, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@79df5bcd,
 timestamped = true, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@79df5bcd,
 timestamped = true, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@4e46af2d,
 timestamped = true, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@4e46af2d,
 timestamped = true, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@23d3ebeb,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@23d3ebeb,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@1fcbc958,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@1fcbc958,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@1cc9e85f,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@1cc9e85f,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@670d032c,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@670d032c,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@5bbd3095,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@5bbd3095,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@1afdb3b3,
 timestamped = true, caching = false, logging = false] 

Build failed in Jenkins: Kafka » kafka-trunk-jdk8 #275

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10826; Ensure raft io thread respects linger timeout (#9716)

[github] KAFKA-10289; Fix failed connect_distributed_test.py 
(ConnectDistributedTest.test_bounce) (#9673)

[github] KAFKA-9892; Producer state snapshot should be forced to disk (#9621)


--
[...truncated 3.46 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
STARTED


Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #300

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10826; Ensure raft io thread respects linger timeout (#9716)

[github] KAFKA-10289; Fix failed connect_distributed_test.py 
(ConnectDistributedTest.test_bounce) (#9673)

[github] KAFKA-9892; Producer state snapshot should be forced to disk (#9621)


--
[...truncated 3.49 MB...]
org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@53203134, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@53203134, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@61bb3f8e, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@61bb3f8e, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@76c3e2de, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@76c3e2de, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2e42c2a6, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2e42c2a6, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7c39efcb, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7c39efcb, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4446e88d, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4446e88d, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2d7a27c9, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2d7a27c9, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@15959732, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@15959732, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@266165fc, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@266165fc, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@5510f33d, 
timestamped = false, caching = true, logging = true] STARTED


[GitHub] [kafka-site] mjsax opened a new pull request #313: MINOR: remove quickstart-docker.html

2020-12-09 Thread GitBox


mjsax opened a new pull request #313:
URL: https://github.com/apache/kafka-site/pull/313


   This page was added during the web-page redesign but was never finished
   and is dangling.
   
   Call for review @scott-confluent @guozhangwang @miguno 
   
   \cc @mimaison @bbejeck (to make sure we don't re-publish it during the 
release)



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




Build failed in Jenkins: Kafka » kafka-trunk-jdk8 #274

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10606: Disable auto topic creation for fetch-all-topic-metadata 
request (#9435)

[github] MINOR: Remove connection id from Send and consolidate request/message 
utils (#9714)

[github] MINOR: add "flush=True" to all print in system tests (#9711)


--
[...truncated 6.92 MB...]

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopicDeprecated[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopicDeprecated[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowPatternNotValidForTopicNameException[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowPatternNotValidForTopicNameException[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldEnqueueLaterOutputsAfterEarlierOnes[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldEnqueueLaterOutputsAfterEarlierOnes[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializersDeprecated[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializersDeprecated[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED


Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #299

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[Ismael Juma] Revert "KAFKA-10713: Stricter protocol parsing in hostnames 
(#9593)"

[github] KAFKA-10606: Disable auto topic creation for fetch-all-topic-metadata 
request (#9435)

[github] MINOR: Remove connection id from Send and consolidate request/message 
utils (#9714)

[github] MINOR: add "flush=True" to all print in system tests (#9711)


--
[...truncated 3.50 MB...]
org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardInit 
STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardInit 
PASSED

org.apache.kafka.streams.TestTopicsTest > testNonUsedOutputTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testNonUsedOutputTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testEmptyTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testEmptyTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testStartTimestamp STARTED

org.apache.kafka.streams.TestTopicsTest > testStartTimestamp PASSED

org.apache.kafka.streams.TestTopicsTest > testNegativeAdvance STARTED

org.apache.kafka.streams.TestTopicsTest > testNegativeAdvance PASSED

org.apache.kafka.streams.TestTopicsTest > shouldNotAllowToCreateWithNullDriver 
STARTED

org.apache.kafka.streams.TestTopicsTest > shouldNotAllowToCreateWithNullDriver 
PASSED

org.apache.kafka.streams.TestTopicsTest > testDuration STARTED

org.apache.kafka.streams.TestTopicsTest > testDuration PASSED

org.apache.kafka.streams.TestTopicsTest > testOutputToString STARTED

org.apache.kafka.streams.TestTopicsTest > testOutputToString PASSED

org.apache.kafka.streams.TestTopicsTest > testValue STARTED

org.apache.kafka.streams.TestTopicsTest > testValue PASSED

org.apache.kafka.streams.TestTopicsTest > testTimestampAutoAdvance STARTED

org.apache.kafka.streams.TestTopicsTest > testTimestampAutoAdvance PASSED

org.apache.kafka.streams.TestTopicsTest > testOutputWrongSerde STARTED

org.apache.kafka.streams.TestTopicsTest > testOutputWrongSerde PASSED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputTopicWithNullTopicName STARTED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputTopicWithNullTopicName PASSED

org.apache.kafka.streams.TestTopicsTest > testWrongSerde STARTED

org.apache.kafka.streams.TestTopicsTest > testWrongSerde PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValuesToMapWithNull STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValuesToMapWithNull PASSED

org.apache.kafka.streams.TestTopicsTest > testNonExistingOutputTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testNonExistingOutputTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testMultipleTopics STARTED

org.apache.kafka.streams.TestTopicsTest > testMultipleTopics PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValueList STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValueList PASSED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputWithNullDriver STARTED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputWithNullDriver PASSED

org.apache.kafka.streams.TestTopicsTest > testValueList STARTED

org.apache.kafka.streams.TestTopicsTest > testValueList PASSED

org.apache.kafka.streams.TestTopicsTest > testRecordList STARTED

org.apache.kafka.streams.TestTopicsTest > testRecordList PASSED

org.apache.kafka.streams.TestTopicsTest > testNonExistingInputTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testNonExistingInputTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValuesToMap STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValuesToMap PASSED

org.apache.kafka.streams.TestTopicsTest > testRecordsToList STARTED

org.apache.kafka.streams.TestTopicsTest > testRecordsToList PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValueListDuration STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValueListDuration PASSED

org.apache.kafka.streams.TestTopicsTest > testInputToString STARTED

org.apache.kafka.streams.TestTopicsTest > testInputToString PASSED

org.apache.kafka.streams.TestTopicsTest > testTimestamp STARTED

org.apache.kafka.streams.TestTopicsTest > testTimestamp PASSED

org.apache.kafka.streams.TestTopicsTest > testWithHeaders STARTED

org.apache.kafka.streams.TestTopicsTest > testWithHeaders PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValue STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValue PASSED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateTopicWithNullTopicName STARTED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateTopicWithNullTopicName PASSED

> Task :streams:upgrade-system-tests-0101:spotbugsMain NO-SOURCE
> Task :streams:upgrade-system-tests-0101:test
> Task :streams:upgrade-system-tests-0102:compileJava NO-SOURCE
> Task 

[jira] [Resolved] (KAFKA-10289) fix failed connect_distributed_test.py (ConnectDistributedTest.test_bounce)

2020-12-09 Thread Jason Gustafson (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10289?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson resolved KAFKA-10289.
-
Resolution: Fixed

> fix failed connect_distributed_test.py (ConnectDistributedTest.test_bounce)
> ---
>
> Key: KAFKA-10289
> URL: https://issues.apache.org/jira/browse/KAFKA-10289
> Project: Kafka
>  Issue Type: Sub-task
>  Components: KafkaConnect, system tests
>Reporter: Chia-Ping Tsai
>Assignee: Chia-Ping Tsai
>Priority: Major
>
> {quote}
> Module: kafkatest.tests.connect.connect_distributed_test
> Class:  ConnectDistributedTest
> Method: test_broker_compatibility
> Arguments:
> {
>   "auto_create_topics": false,
>   "broker_version": "0.10.1.1",
>   "connect_protocol": "compatible",
>   "security_protocol": "PLAINTEXT"
> }
> {quote}
> {quote}
> Module: kafkatest.tests.connect.connect_distributed_test
> Class:  ConnectDistributedTest
> Method: test_broker_compatibility
> Arguments:
> {
>   "auto_create_topics": false,
>   "broker_version": "2.1.1",
>   "connect_protocol": "compatible",
>   "security_protocol": "PLAINTEXT"
> }
> {quote}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Resolved] (KAFKA-10826) Ensure raft io thread wakes up after linger expiration

2020-12-09 Thread Jason Gustafson (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10826?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson resolved KAFKA-10826.
-
Resolution: Fixed

> Ensure raft io thread wakes up after linger expiration
> --
>
> Key: KAFKA-10826
> URL: https://issues.apache.org/jira/browse/KAFKA-10826
> Project: Kafka
>  Issue Type: Sub-task
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Major
>
> When scheduling an append, we currently only wakeup the IO thread after the 
> batch is ready to drain. If the IO thread is blocking in `poll()`, there is 
> no guarantee that it will get woken up by a subsequent append. We need to 
> ensure that the thread gets woken up at least once when the linger timer 
> starts ticking so that the IO thread will be ready when the batch is ready to 
> drain.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10832) Recovery logic is using incorrect ProducerStateManager instance when updating producers

2020-12-09 Thread Kowshik Prakasam (Jira)
Kowshik Prakasam created KAFKA-10832:


 Summary: Recovery logic is using incorrect ProducerStateManager 
instance when updating producers 
 Key: KAFKA-10832
 URL: https://issues.apache.org/jira/browse/KAFKA-10832
 Project: Kafka
  Issue Type: Bug
Reporter: Kowshik Prakasam
Assignee: Kowshik Prakasam


The bug is that from within {{Log.updateProducers(…)}}, the code operates on 
the {{producerStateManager}} attribute of the {{Log}} instance instead of 
operating on an input parameter. Please see 
[this|https://github.com/apache/kafka/blob/1d84f543678c4c08800bc3ea18c04a9db8adf7e4/core/src/main/scala/kafka/log/Log.scala#L1464]
 LOC where it calls {{producerStateManager.prepareUpdate}} thus accessing the 
attribute from the {{Log}} object (see 
[this|https://github.com/apache/kafka/blob/1d84f543678c4c08800bc3ea18c04a9db8adf7e4/core/src/main/scala/kafka/log/Log.scala#L251]).
 This looks unusual particularly for {{Log.loadProducersFromLog(...)}} 
[path|https://github.com/apache/kafka/blob/1d84f543678c4c08800bc3ea18c04a9db8adf7e4/core/src/main/scala/kafka/log/Log.scala#L956].
 Here I believe we should be using the instance passed to the method, rather 
than the attribute from the {{Log}} instance.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [VOTE] KIP-695: Improve Streams Time Synchronization

2020-12-09 Thread Bruno Cadonna

Thanks for the KIP, John!

+1 (non-binding)

Best,
Bruno

On 08.12.20 18:03, John Roesler wrote:

Hello all,

There hasn't been much discussion on KIP-695 so far, so I'd
like to go ahead and call for a vote.

As a reminder, the purpose of KIP-695 to improve on the
"task idling" feature we introduced in KIP-353. This KIP
will allow Streams to offer deterministic time semantics in
join-type topologies. For example, it makes sure that
when you join two topics, that we collate the topics by
timestamp. That was always the intent with task idling (KIP-
353), but it turns out the previous mechanism couldn't
provide the desired semantics.

The details are here:
https://cwiki.apache.org/confluence/x/JSXZCQ

Thanks,
-John



Re: [VOTE] KIP-696: Update Streams FSM to clarify ERROR state meaning

2020-12-09 Thread Leah Thomas
Looks good, thanks Walker! +1 (non-binding)

Leah

On Wed, Dec 9, 2020 at 1:04 PM John Roesler  wrote:

> Thanks, Walker!
>
> I'm also +1 (binding)
>
> -John
>
> On Wed, 2020-12-09 at 11:03 -0800, Guozhang Wang wrote:
> > +1. Thanks Walker.
> >
> > On Wed, Dec 9, 2020 at 10:58 AM Walker Carlson 
> > wrote:
> >
> > > Sorry I forgot to change the subject line to vote.
> > >
> > > Thanks for the comments. If there are no further concerns I would like
> to
> > > call for a vote on KIP-696 to clarify and clean up the Streams State
> > > Machine.
> > >
> > > On Wed, Dec 9, 2020 at 10:04 AM Walker Carlson 
> > > wrote:
> > >
> > > > Thanks for the comments. If there are no further concerns I would
> like to
> > > > call for a vote on KIP-696 to clarify and clean up the Streams State
> > > > Machine.
> > > >
> > > > walker
> > > >
> > > > On Wed, Dec 9, 2020 at 8:50 AM John Roesler 
> wrote:
> > > >
> > > > > Thanks, Walker!
> > > > >
> > > > > Your proposal looks good to me.
> > > > >
> > > > > -John
> > > > >
> > > > > On Tue, 2020-12-08 at 18:29 -0800, Walker Carlson wrote:
> > > > > > Thanks for the feedback Guozhang!
> > > > > >
> > > > > > I clarified some of the points in the Proposed Changes section so
> > > > > hopefully
> > > > > > it will be more clear what is going on now. I also agree with
> your
> > > > > > suggestion about the possible call to close() on ERROR so I
> added this
> > > > > > line.
> > > > > > "Close() called on ERROR will be idempotent and not throw an
> > > exception,
> > > > > but
> > > > > > we will log a warning."
> > > > > >
> > > > > > I have linked those tickets and I will leave a comment trying to
> > > explain
> > > > > > how these changes will affect their issue.
> > > > > >
> > > > > > walker
> > > > > >
> > > > > > On Tue, Dec 8, 2020 at 4:57 PM Guozhang Wang  >
> > > > > wrote:
> > > > > >
> > > > > > > Hello Walker,
> > > > > > >
> > > > > > > Thanks for the KIP! Overall it looks reasonable to me. Just a
> few
> > > > > minor
> > > > > > > comments for the wiki page itself:
> > > > > > >
> > > > > > > 1) Could you clarify the conditions when RUNNING / REBALANCING
> ->
> > > > > > > PENDING_ERROR will happen; and when PENDING_ERROR -> ERROR will
> > > > > happen.
> > > > > > > E.g. when I read "Streams will only reach ERROR state in the
> event
> > > of
> > > > > an
> > > > > > > exceptional failure in which the
> `StreamsUncaughtExceptionHandler`
> > > > > chose to
> > > > > > > either shutdown the application or the client." I thought the
> first
> > > > > > > transition would happen before the handler, and the second
> > > transition
> > > > > would
> > > > > > > happen immediately after the handler returns "shutdown client"
> or
> > > > > "shutdown
> > > > > > > application", until I read the last statement regarding
> > > > > "SHUTDOWN_CLIENT".
> > > > > > >
> > > > > > > 2) A compatibility issue: today it is possible that users
> would call
> > > > > > > Streams APIs like shutdown in the global state transition
> listener.
> > > > > And
> > > > > > > it's common to try shutting down the application automatically
> when
> > > > > > > transiting to ERROR (assuming it was not a terminating state).
> I
> > > > > think we
> > > > > > > could consider making this call a no-op and log a warning.
> > > > > > >
> > > > > > > 3) Could you link the following JIRAs in the "JIRA" field?
> > > > > > >
> > > > > > > https://issues.apache.org/jira/browse/KAFKA-10555
> > > > > > > https://issues.apache.org/jira/browse/KAFKA-9638
> > > > > > > https://issues.apache.org/jira/browse/KAFKA-6520
> > > > > > >
> > > > > > > And maybe we can also left a comment on those tickets
> explaining
> > > what
> > > > > would
> > > > > > > happen to tackle the issues after this KIP.
> > > > > > >
> > > > > > >
> > > > > > > Guozhang
> > > > > > >
> > > > > > >
> > > > > > > On Tue, Dec 8, 2020 at 12:16 PM Walker Carlson <
> > > wcarl...@confluent.io
> > > > > >
> > > > > > > wrote:
> > > > > > >
> > > > > > > > Hello all,
> > > > > > > >
> > > > > > > > I'd like to propose KIP-696 to clarify the meaning of ERROR
> state
> > > > > in the
> > > > > > > > KafkaStreams Client State Machine. This will update the
> States to
> > > be
> > > > > > > > consistent with changes in KIP-671 and KIP-663.
> > > > > > > >
> > > > > > > > Here are the details:
> > > https://cwiki.apache.org/confluence/x/lCvZCQ
> > > > > > > >
> > > > > > > > Thanks,
> > > > > > > > Walker
> > > > > > > >
> > > > > > >
> > > > > > >
> > > > > > > --
> > > > > > > -- Guozhang
> > > > > > >
> > > > >
> > > > >
> > > > >
> > >
> >
> >
>
>
>


Re: [VOTE] KIP-696: Update Streams FSM to clarify ERROR state meaning

2020-12-09 Thread John Roesler
Thanks, Walker!

I'm also +1 (binding)

-John

On Wed, 2020-12-09 at 11:03 -0800, Guozhang Wang wrote:
> +1. Thanks Walker.
> 
> On Wed, Dec 9, 2020 at 10:58 AM Walker Carlson 
> wrote:
> 
> > Sorry I forgot to change the subject line to vote.
> > 
> > Thanks for the comments. If there are no further concerns I would like to
> > call for a vote on KIP-696 to clarify and clean up the Streams State
> > Machine.
> > 
> > On Wed, Dec 9, 2020 at 10:04 AM Walker Carlson 
> > wrote:
> > 
> > > Thanks for the comments. If there are no further concerns I would like to
> > > call for a vote on KIP-696 to clarify and clean up the Streams State
> > > Machine.
> > > 
> > > walker
> > > 
> > > On Wed, Dec 9, 2020 at 8:50 AM John Roesler  wrote:
> > > 
> > > > Thanks, Walker!
> > > > 
> > > > Your proposal looks good to me.
> > > > 
> > > > -John
> > > > 
> > > > On Tue, 2020-12-08 at 18:29 -0800, Walker Carlson wrote:
> > > > > Thanks for the feedback Guozhang!
> > > > > 
> > > > > I clarified some of the points in the Proposed Changes section so
> > > > hopefully
> > > > > it will be more clear what is going on now. I also agree with your
> > > > > suggestion about the possible call to close() on ERROR so I added this
> > > > > line.
> > > > > "Close() called on ERROR will be idempotent and not throw an
> > exception,
> > > > but
> > > > > we will log a warning."
> > > > > 
> > > > > I have linked those tickets and I will leave a comment trying to
> > explain
> > > > > how these changes will affect their issue.
> > > > > 
> > > > > walker
> > > > > 
> > > > > On Tue, Dec 8, 2020 at 4:57 PM Guozhang Wang 
> > > > wrote:
> > > > > 
> > > > > > Hello Walker,
> > > > > > 
> > > > > > Thanks for the KIP! Overall it looks reasonable to me. Just a few
> > > > minor
> > > > > > comments for the wiki page itself:
> > > > > > 
> > > > > > 1) Could you clarify the conditions when RUNNING / REBALANCING ->
> > > > > > PENDING_ERROR will happen; and when PENDING_ERROR -> ERROR will
> > > > happen.
> > > > > > E.g. when I read "Streams will only reach ERROR state in the event
> > of
> > > > an
> > > > > > exceptional failure in which the `StreamsUncaughtExceptionHandler`
> > > > chose to
> > > > > > either shutdown the application or the client." I thought the first
> > > > > > transition would happen before the handler, and the second
> > transition
> > > > would
> > > > > > happen immediately after the handler returns "shutdown client" or
> > > > "shutdown
> > > > > > application", until I read the last statement regarding
> > > > "SHUTDOWN_CLIENT".
> > > > > > 
> > > > > > 2) A compatibility issue: today it is possible that users would call
> > > > > > Streams APIs like shutdown in the global state transition listener.
> > > > And
> > > > > > it's common to try shutting down the application automatically when
> > > > > > transiting to ERROR (assuming it was not a terminating state). I
> > > > think we
> > > > > > could consider making this call a no-op and log a warning.
> > > > > > 
> > > > > > 3) Could you link the following JIRAs in the "JIRA" field?
> > > > > > 
> > > > > > https://issues.apache.org/jira/browse/KAFKA-10555
> > > > > > https://issues.apache.org/jira/browse/KAFKA-9638
> > > > > > https://issues.apache.org/jira/browse/KAFKA-6520
> > > > > > 
> > > > > > And maybe we can also left a comment on those tickets explaining
> > what
> > > > would
> > > > > > happen to tackle the issues after this KIP.
> > > > > > 
> > > > > > 
> > > > > > Guozhang
> > > > > > 
> > > > > > 
> > > > > > On Tue, Dec 8, 2020 at 12:16 PM Walker Carlson <
> > wcarl...@confluent.io
> > > > > 
> > > > > > wrote:
> > > > > > 
> > > > > > > Hello all,
> > > > > > > 
> > > > > > > I'd like to propose KIP-696 to clarify the meaning of ERROR state
> > > > in the
> > > > > > > KafkaStreams Client State Machine. This will update the States to
> > be
> > > > > > > consistent with changes in KIP-671 and KIP-663.
> > > > > > > 
> > > > > > > Here are the details:
> > https://cwiki.apache.org/confluence/x/lCvZCQ
> > > > > > > 
> > > > > > > Thanks,
> > > > > > > Walker
> > > > > > > 
> > > > > > 
> > > > > > 
> > > > > > --
> > > > > > -- Guozhang
> > > > > > 
> > > > 
> > > > 
> > > > 
> > 
> 
> 




Re: [VOTE] KIP-696: Update Streams FSM to clarify ERROR state meaning

2020-12-09 Thread Guozhang Wang
+1. Thanks Walker.

On Wed, Dec 9, 2020 at 10:58 AM Walker Carlson 
wrote:

> Sorry I forgot to change the subject line to vote.
>
> Thanks for the comments. If there are no further concerns I would like to
> call for a vote on KIP-696 to clarify and clean up the Streams State
> Machine.
>
> On Wed, Dec 9, 2020 at 10:04 AM Walker Carlson 
> wrote:
>
> > Thanks for the comments. If there are no further concerns I would like to
> > call for a vote on KIP-696 to clarify and clean up the Streams State
> > Machine.
> >
> > walker
> >
> > On Wed, Dec 9, 2020 at 8:50 AM John Roesler  wrote:
> >
> >> Thanks, Walker!
> >>
> >> Your proposal looks good to me.
> >>
> >> -John
> >>
> >> On Tue, 2020-12-08 at 18:29 -0800, Walker Carlson wrote:
> >> > Thanks for the feedback Guozhang!
> >> >
> >> > I clarified some of the points in the Proposed Changes section so
> >> hopefully
> >> > it will be more clear what is going on now. I also agree with your
> >> > suggestion about the possible call to close() on ERROR so I added this
> >> > line.
> >> > "Close() called on ERROR will be idempotent and not throw an
> exception,
> >> but
> >> > we will log a warning."
> >> >
> >> > I have linked those tickets and I will leave a comment trying to
> explain
> >> > how these changes will affect their issue.
> >> >
> >> > walker
> >> >
> >> > On Tue, Dec 8, 2020 at 4:57 PM Guozhang Wang 
> >> wrote:
> >> >
> >> > > Hello Walker,
> >> > >
> >> > > Thanks for the KIP! Overall it looks reasonable to me. Just a few
> >> minor
> >> > > comments for the wiki page itself:
> >> > >
> >> > > 1) Could you clarify the conditions when RUNNING / REBALANCING ->
> >> > > PENDING_ERROR will happen; and when PENDING_ERROR -> ERROR will
> >> happen.
> >> > > E.g. when I read "Streams will only reach ERROR state in the event
> of
> >> an
> >> > > exceptional failure in which the `StreamsUncaughtExceptionHandler`
> >> chose to
> >> > > either shutdown the application or the client." I thought the first
> >> > > transition would happen before the handler, and the second
> transition
> >> would
> >> > > happen immediately after the handler returns "shutdown client" or
> >> "shutdown
> >> > > application", until I read the last statement regarding
> >> "SHUTDOWN_CLIENT".
> >> > >
> >> > > 2) A compatibility issue: today it is possible that users would call
> >> > > Streams APIs like shutdown in the global state transition listener.
> >> And
> >> > > it's common to try shutting down the application automatically when
> >> > > transiting to ERROR (assuming it was not a terminating state). I
> >> think we
> >> > > could consider making this call a no-op and log a warning.
> >> > >
> >> > > 3) Could you link the following JIRAs in the "JIRA" field?
> >> > >
> >> > > https://issues.apache.org/jira/browse/KAFKA-10555
> >> > > https://issues.apache.org/jira/browse/KAFKA-9638
> >> > > https://issues.apache.org/jira/browse/KAFKA-6520
> >> > >
> >> > > And maybe we can also left a comment on those tickets explaining
> what
> >> would
> >> > > happen to tackle the issues after this KIP.
> >> > >
> >> > >
> >> > > Guozhang
> >> > >
> >> > >
> >> > > On Tue, Dec 8, 2020 at 12:16 PM Walker Carlson <
> wcarl...@confluent.io
> >> >
> >> > > wrote:
> >> > >
> >> > > > Hello all,
> >> > > >
> >> > > > I'd like to propose KIP-696 to clarify the meaning of ERROR state
> >> in the
> >> > > > KafkaStreams Client State Machine. This will update the States to
> be
> >> > > > consistent with changes in KIP-671 and KIP-663.
> >> > > >
> >> > > > Here are the details:
> https://cwiki.apache.org/confluence/x/lCvZCQ
> >> > > >
> >> > > > Thanks,
> >> > > > Walker
> >> > > >
> >> > >
> >> > >
> >> > > --
> >> > > -- Guozhang
> >> > >
> >>
> >>
> >>
>


-- 
-- Guozhang


Re: [VOTE] KIP-696: Update Streams FSM to clarify ERROR state meaning

2020-12-09 Thread Walker Carlson
Sorry I forgot to change the subject line to vote.

Thanks for the comments. If there are no further concerns I would like to
call for a vote on KIP-696 to clarify and clean up the Streams State
Machine.

On Wed, Dec 9, 2020 at 10:04 AM Walker Carlson 
wrote:

> Thanks for the comments. If there are no further concerns I would like to
> call for a vote on KIP-696 to clarify and clean up the Streams State
> Machine.
>
> walker
>
> On Wed, Dec 9, 2020 at 8:50 AM John Roesler  wrote:
>
>> Thanks, Walker!
>>
>> Your proposal looks good to me.
>>
>> -John
>>
>> On Tue, 2020-12-08 at 18:29 -0800, Walker Carlson wrote:
>> > Thanks for the feedback Guozhang!
>> >
>> > I clarified some of the points in the Proposed Changes section so
>> hopefully
>> > it will be more clear what is going on now. I also agree with your
>> > suggestion about the possible call to close() on ERROR so I added this
>> > line.
>> > "Close() called on ERROR will be idempotent and not throw an exception,
>> but
>> > we will log a warning."
>> >
>> > I have linked those tickets and I will leave a comment trying to explain
>> > how these changes will affect their issue.
>> >
>> > walker
>> >
>> > On Tue, Dec 8, 2020 at 4:57 PM Guozhang Wang 
>> wrote:
>> >
>> > > Hello Walker,
>> > >
>> > > Thanks for the KIP! Overall it looks reasonable to me. Just a few
>> minor
>> > > comments for the wiki page itself:
>> > >
>> > > 1) Could you clarify the conditions when RUNNING / REBALANCING ->
>> > > PENDING_ERROR will happen; and when PENDING_ERROR -> ERROR will
>> happen.
>> > > E.g. when I read "Streams will only reach ERROR state in the event of
>> an
>> > > exceptional failure in which the `StreamsUncaughtExceptionHandler`
>> chose to
>> > > either shutdown the application or the client." I thought the first
>> > > transition would happen before the handler, and the second transition
>> would
>> > > happen immediately after the handler returns "shutdown client" or
>> "shutdown
>> > > application", until I read the last statement regarding
>> "SHUTDOWN_CLIENT".
>> > >
>> > > 2) A compatibility issue: today it is possible that users would call
>> > > Streams APIs like shutdown in the global state transition listener.
>> And
>> > > it's common to try shutting down the application automatically when
>> > > transiting to ERROR (assuming it was not a terminating state). I
>> think we
>> > > could consider making this call a no-op and log a warning.
>> > >
>> > > 3) Could you link the following JIRAs in the "JIRA" field?
>> > >
>> > > https://issues.apache.org/jira/browse/KAFKA-10555
>> > > https://issues.apache.org/jira/browse/KAFKA-9638
>> > > https://issues.apache.org/jira/browse/KAFKA-6520
>> > >
>> > > And maybe we can also left a comment on those tickets explaining what
>> would
>> > > happen to tackle the issues after this KIP.
>> > >
>> > >
>> > > Guozhang
>> > >
>> > >
>> > > On Tue, Dec 8, 2020 at 12:16 PM Walker Carlson > >
>> > > wrote:
>> > >
>> > > > Hello all,
>> > > >
>> > > > I'd like to propose KIP-696 to clarify the meaning of ERROR state
>> in the
>> > > > KafkaStreams Client State Machine. This will update the States to be
>> > > > consistent with changes in KIP-671 and KIP-663.
>> > > >
>> > > > Here are the details: https://cwiki.apache.org/confluence/x/lCvZCQ
>> > > >
>> > > > Thanks,
>> > > > Walker
>> > > >
>> > >
>> > >
>> > > --
>> > > -- Guozhang
>> > >
>>
>>
>>


Jenkins build is back to normal : Kafka » kafka-trunk-jdk15 #319

2020-12-09 Thread Apache Jenkins Server
See 




[jira] [Reopened] (KAFKA-10713) Surprising behaviour when bootstrap servers are separated by semicolons

2020-12-09 Thread Tom Bentley (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10713?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Tom Bentley reopened KAFKA-10713:
-

> Surprising behaviour when bootstrap servers are separated by semicolons
> ---
>
> Key: KAFKA-10713
> URL: https://issues.apache.org/jira/browse/KAFKA-10713
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Mickael Maison
>Assignee: Tom Bentley
>Priority: Major
> Fix For: 2.8.0
>
>
> When creating a Kafka client with {{bootstrap.servers}} set to 
> "kafka-0:9092;kafka-1:9092;kafka-2:9092", it has a strange behaviour.
> For once, there's no warning or error messages. The client will connect and 
> start working. However, it will only use the hostname after the last 
> semicolon as bootstrap server!
> The configuration {{bootstrap.servers}} is defined as a {{List}} in 
> {{AbstractConfig}}. So from a configuration point of view, 
> "kafka-0:9092;kafka-1:9092;kafka-2:9092" is a single entry.
> Then, {{Utils.getHost()}} returns "kafka-2" when parsing that string.
> {code:java}
> assertEquals("kafka-2", getHost("kafka-1:9092;kafka-1:9092;kafka-2:9092"));
> {code}
> So the client ends up with a single bootstrap server! 
> I believe semicolon are not valid characters in hostname/domain names, so we 
> should be able to provide better validation.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[DISCUSS] KIP-697: Stricter parsing of addresses in configs

2020-12-09 Thread Tom Bentley
Hi,

I'd like to start a discussion on a small KIP which proposes stricter
parsing of host:port addresses in various configs for Kafka 3.0:

https://cwiki.apache.org/confluence/display/KAFKA/KIP-697%3A+Stricter+parsing+of+addresses+in+configs

I'd be grateful for any feedback people may have.

Kind regards,

Tom


Re: [DISCUSS] KIP-631: The Quorum-based Kafka Controller

2020-12-09 Thread Jun Rao
Hi, Colin,

Thanks for the update. A few more follow up comments.

100. FailedReplicaRecord: Since this is reported by each broker
independently, perhaps we could use a more concise representation that has
a top level broker field, an array of topics, which has an array of
partitions.

200. Sounds good. If we remove the broker-side fencing logic, do we plan to
still keep FENCED in broker state? Do we plan to expose the new states
through the existing BrokerState metric and if so, what are the values for
the new states?

201. This may be fine too. Could we document what happens when the
broker.id/controller.id in metadata.properties don't match the broker
config when the broker starts up?

204. There is still "The highest metadata offset which the broker has not
reached" referenced under BrokerRegistration.

206. Is that separate step needed given KIP-516? With KIP-516 (
https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers#KIP516:TopicIdentifiers-LeaderAndIsr),
we don't need to wait for the topic data to be removed from all brokers
before removing the topic metadata. The combination of unmatching topicId
or the missing topicId from the metadata is enough for the broker to clean
up deleted topics asynchronously.

Jun




On Tue, Dec 8, 2020 at 5:27 PM Colin McCabe  wrote:

> On Thu, Dec 3, 2020, at 16:37, Jun Rao wrote:
> > Hi, Colin,
> >
> > Thanks for the updated KIP. A few more comments below.
> >
>
> Hi Jun,
>
> Thanks again for the reviews.
>
> > 80.2 For deprecated configs, we need to include zookeeper.* and
> > broker.id.generation.enable.
> >
>
> Added.
>
> > 83.1 If a broker is down, does the controller keep the previously
> > registered broker epoch forever? If not, how long does the controller
> keep
> > it? What does the controller do when receiving a broker heartbeat request
> > with an unfound broker epoch?
> >
>
> Yes, the controller keeps the previous registration forever.
>
> Broker heartbeat requests with an incorrect broker epoch will be rejected
> with STALE_BROKER_EPOCH.
>
> > 100. Have you figured out if we need to add a new record type for
> reporting
> > partitions on failed disks?
> >
>
> I added FailedReplicaRecord to reflect the case where a JBOD directory has
> failed, leading to failed replicas.
>
> > 102. For debugging purposes, sometimes it's useful to read the metadata
> > topic using tools like console-consumer. Should we support that and if
> so,
> > how?
> >
>
> For now, we have the ability to read the metadata logs with the dump-logs
> tool.  I think we will come up with some other tools in the future as we
> get experience.
>
> > 200. "brokers which are fenced will not appear in MetadataResponses. The
> > broker will not respond to these requests-- instead, it will simply
> > disconnect." If the controller is partitioned off from the brokers, this
> > design will cause every broker to stop accepting new client requests. In
> > contrast, if ZK is partitioned off, the existing behavior is that the
> > brokers can continue to work based on the last known metadata. So, I am
> not
> > sure if we should change the existing behavior because of the bigger
> impact
> > in the new one. Another option is to keep the existing behavior and
> expose
> > a metric for fenced brokers so that the operator could be alerted.
> >
>
> I'm skeptical about how well running without ZK currently works.  However,
> I will move the broker-side fencing into a follow-up KIP.  This KIP is
> already pretty large and there is no hard dependency on this.  There may
> also be other ways of accomplishing the positive effects of what
> broker-side fencing, so more discussion is needed.
>
> > 201. I read Ron's comment, but I am still not sure the benefit of keeping
> > broker.id and controller.id in meta.properties. It seems that we are
> just
> > duplicating the same info in two places and have the additional burden of
> > making sure the values in the two places are consistent.
> >
>
> I think the reasoning is that having broker.id protects us against
> accidentally bringing up a broker with a disk from a different broker.  I
> don't feel strongly about this but it seemed simpler to keep it.
>
> > 202. controller.connect.security.protocol: Is this needed since
> > controller.listener.names and listener.security.protocol.map imply the
> > security protocol already?
> >
>
> You're right, this isn't needed.  I'll remove it.
>
> > 203. registration.heartbeat.interval.ms: It defaults to 2k. ZK uses 1/3
> of
> > the session timeout for heartbeat. So, given the default 18k for
> > registration.lease.timeout.ms, should we default
> > registration.heartbeat.interval.ms to 6k?
> >
>
> 6 seconds seems like a pretty long time between heartbeats.  It might be
> useful to know when a broker is missing heartbeats, with less time than
> that.  I provisionally set it to 3 seconds (we can always change later...)
>
> I also changed the name of these configurations to "
> 

Re: [DISCUSS] KIP-696: Update Streams FSM to clarify ERROR state meaning

2020-12-09 Thread Walker Carlson
Thanks for the comments. If there are no further concerns I would like to
call for a vote on KIP-696 to clarify and clean up the Streams State
Machine.

walker

On Wed, Dec 9, 2020 at 8:50 AM John Roesler  wrote:

> Thanks, Walker!
>
> Your proposal looks good to me.
>
> -John
>
> On Tue, 2020-12-08 at 18:29 -0800, Walker Carlson wrote:
> > Thanks for the feedback Guozhang!
> >
> > I clarified some of the points in the Proposed Changes section so
> hopefully
> > it will be more clear what is going on now. I also agree with your
> > suggestion about the possible call to close() on ERROR so I added this
> > line.
> > "Close() called on ERROR will be idempotent and not throw an exception,
> but
> > we will log a warning."
> >
> > I have linked those tickets and I will leave a comment trying to explain
> > how these changes will affect their issue.
> >
> > walker
> >
> > On Tue, Dec 8, 2020 at 4:57 PM Guozhang Wang  wrote:
> >
> > > Hello Walker,
> > >
> > > Thanks for the KIP! Overall it looks reasonable to me. Just a few minor
> > > comments for the wiki page itself:
> > >
> > > 1) Could you clarify the conditions when RUNNING / REBALANCING ->
> > > PENDING_ERROR will happen; and when PENDING_ERROR -> ERROR will happen.
> > > E.g. when I read "Streams will only reach ERROR state in the event of
> an
> > > exceptional failure in which the `StreamsUncaughtExceptionHandler`
> chose to
> > > either shutdown the application or the client." I thought the first
> > > transition would happen before the handler, and the second transition
> would
> > > happen immediately after the handler returns "shutdown client" or
> "shutdown
> > > application", until I read the last statement regarding
> "SHUTDOWN_CLIENT".
> > >
> > > 2) A compatibility issue: today it is possible that users would call
> > > Streams APIs like shutdown in the global state transition listener. And
> > > it's common to try shutting down the application automatically when
> > > transiting to ERROR (assuming it was not a terminating state). I think
> we
> > > could consider making this call a no-op and log a warning.
> > >
> > > 3) Could you link the following JIRAs in the "JIRA" field?
> > >
> > > https://issues.apache.org/jira/browse/KAFKA-10555
> > > https://issues.apache.org/jira/browse/KAFKA-9638
> > > https://issues.apache.org/jira/browse/KAFKA-6520
> > >
> > > And maybe we can also left a comment on those tickets explaining what
> would
> > > happen to tackle the issues after this KIP.
> > >
> > >
> > > Guozhang
> > >
> > >
> > > On Tue, Dec 8, 2020 at 12:16 PM Walker Carlson 
> > > wrote:
> > >
> > > > Hello all,
> > > >
> > > > I'd like to propose KIP-696 to clarify the meaning of ERROR state in
> the
> > > > KafkaStreams Client State Machine. This will update the States to be
> > > > consistent with changes in KIP-671 and KIP-663.
> > > >
> > > > Here are the details: https://cwiki.apache.org/confluence/x/lCvZCQ
> > > >
> > > > Thanks,
> > > > Walker
> > > >
> > >
> > >
> > > --
> > > -- Guozhang
> > >
>
>
>


Re: [DISCUSS] KIP-696: Update Streams FSM to clarify ERROR state meaning

2020-12-09 Thread John Roesler
Thanks, Walker!

Your proposal looks good to me.

-John

On Tue, 2020-12-08 at 18:29 -0800, Walker Carlson wrote:
> Thanks for the feedback Guozhang!
> 
> I clarified some of the points in the Proposed Changes section so hopefully
> it will be more clear what is going on now. I also agree with your
> suggestion about the possible call to close() on ERROR so I added this
> line.
> "Close() called on ERROR will be idempotent and not throw an exception, but
> we will log a warning."
> 
> I have linked those tickets and I will leave a comment trying to explain
> how these changes will affect their issue.
> 
> walker
> 
> On Tue, Dec 8, 2020 at 4:57 PM Guozhang Wang  wrote:
> 
> > Hello Walker,
> > 
> > Thanks for the KIP! Overall it looks reasonable to me. Just a few minor
> > comments for the wiki page itself:
> > 
> > 1) Could you clarify the conditions when RUNNING / REBALANCING ->
> > PENDING_ERROR will happen; and when PENDING_ERROR -> ERROR will happen.
> > E.g. when I read "Streams will only reach ERROR state in the event of an
> > exceptional failure in which the `StreamsUncaughtExceptionHandler` chose to
> > either shutdown the application or the client." I thought the first
> > transition would happen before the handler, and the second transition would
> > happen immediately after the handler returns "shutdown client" or "shutdown
> > application", until I read the last statement regarding "SHUTDOWN_CLIENT".
> > 
> > 2) A compatibility issue: today it is possible that users would call
> > Streams APIs like shutdown in the global state transition listener. And
> > it's common to try shutting down the application automatically when
> > transiting to ERROR (assuming it was not a terminating state). I think we
> > could consider making this call a no-op and log a warning.
> > 
> > 3) Could you link the following JIRAs in the "JIRA" field?
> > 
> > https://issues.apache.org/jira/browse/KAFKA-10555
> > https://issues.apache.org/jira/browse/KAFKA-9638
> > https://issues.apache.org/jira/browse/KAFKA-6520
> > 
> > And maybe we can also left a comment on those tickets explaining what would
> > happen to tackle the issues after this KIP.
> > 
> > 
> > Guozhang
> > 
> > 
> > On Tue, Dec 8, 2020 at 12:16 PM Walker Carlson 
> > wrote:
> > 
> > > Hello all,
> > > 
> > > I'd like to propose KIP-696 to clarify the meaning of ERROR state in the
> > > KafkaStreams Client State Machine. This will update the States to be
> > > consistent with changes in KIP-671 and KIP-663.
> > > 
> > > Here are the details: https://cwiki.apache.org/confluence/x/lCvZCQ
> > > 
> > > Thanks,
> > > Walker
> > > 
> > 
> > 
> > --
> > -- Guozhang
> > 




[jira] [Resolved] (KAFKA-10606) Auto create non-existent topics when fetching metadata for all topics

2020-12-09 Thread Chia-Ping Tsai (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10606?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Chia-Ping Tsai resolved KAFKA-10606.

Fix Version/s: 2.7.0
   Resolution: Fixed

> Auto create non-existent topics when fetching metadata for all topics
> -
>
> Key: KAFKA-10606
> URL: https://issues.apache.org/jira/browse/KAFKA-10606
> Project: Kafka
>  Issue Type: Bug
>Reporter: Lincong Li
>Assignee: Lincong Li
>Priority: Major
> Fix For: 2.7.0
>
>
> The "allow auto topic creation" flag is hardcoded to be true for the 
> fetch-all-topic metadata request:
> https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java#L37
> In the below code, annotation claims that "*This never causes 
> auto-creation*". It it NOT true and auto topic creation still gets triggered 
> under some circumstances. So, this is a bug that needs to be fixed.
> https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java#L68
> For example, the bug could be manifested in the below situation:
> A topic T is being deleted and a request to fetch metadata for all topics 
> gets sent to one broker. The broker reads names of all topics from its 
> metadata cache (shown below).
> https://github.com/apache/kafka/blob/trunk/core/src/main/scala/kafka/server/KafkaApis.scala#L1196
> Then the broker authorizes all topics and makes sure that they are allowed to 
> be described. Then the broker tries to get metadata for every authorized 
> topic by reading the metadata cache again, once for every topic (show below).
> https://github.com/apache/kafka/blob/trunk/core/src/main/scala/kafka/server/KafkaApis.scala#L1240
> However, the metadata cache could have been updated while the broker was 
> authorizing all topics and topic T and its metadata no longer exist in the 
> cache since the topic got deleted and metadata update requests eventually got 
> propagated from the controller to all brokers. So, at this point, when the 
> broker tries to get metadata for topic T from its cache, it realizes that it 
> does not exist and the broker tries to "auto create" topic T since the 
> allow-auto-topic-creation flag was set to true in all the fetch-all-topic 
> metadata requests.
> I think this bug exists since "*metadataRequest.allowAutoTopicCreation*" was 
> introduced.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Jenkins build is back to normal : Kafka » kafka-trunk-jdk8 #272

2020-12-09 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : Kafka » kafka-trunk-jdk11 #297

2020-12-09 Thread Apache Jenkins Server
See 




Build failed in Jenkins: Kafka » kafka-trunk-jdk15 #318

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Clean up streams metric sensors (#9696)


--
[...truncated 6.98 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@25ebf053, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@25ebf053, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7bef19eb, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7bef19eb, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@3648fa5, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@3648fa5, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@728a4406, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@728a4406, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@8195b6c, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@8195b6c, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@bba3bc5, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@bba3bc5, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@2c8494b0, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@2c8494b0, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@c8e5f52, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@c8e5f52, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@4e8ea794, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@4e8ea794, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@14b0c993, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@14b0c993, 
timestamped = false, caching = false, logging = true] PASSED


Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #296

2020-12-09 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Using primitive data types for loop index (#9705)


--
[...truncated 3.49 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@5da37dbd, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@412cf9fb, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@412cf9fb, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@3ed2fbb0, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@3ed2fbb0, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1dd6aa17, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1dd6aa17, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@5b5e7285, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@5b5e7285, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@7d9bab34, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@7d9bab34, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@54bdc04c, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@54bdc04c, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@7c934ede, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@7c934ede, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@5471d193, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@5471d193, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@74931222, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@74931222, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@4e16035c, 
timestamped = false, caching = false, logging = true] STARTED