Build failed in Jenkins: kafka-trunk-jdk8 #981

2016-10-14 Thread Apache Jenkins Server
See 

Changes:

[jason] KAFKA-4303; Ensure commitSync does not block unnecessarily in poll

--
[...truncated 14189 lines...]
org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldCantHaveNullPredicate PASSED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullActionOnForEach STARTED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullActionOnForEach PASSED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullValueMapperOnTableJoin STARTED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullValueMapperOnTableJoin PASSED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullPredicateOnFilterNot STARTED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullPredicateOnFilterNot PASSED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldHaveAtLeastOnPredicateWhenBranching STARTED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldHaveAtLeastOnPredicateWhenBranching PASSED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullFilePathOnWriteAsText STARTED

org.apache.kafka.streams.kstream.internals.KStreamImplTest > 
shouldNotAllowNullFilePathOnWriteAsText PASSED

org.apache.kafka.streams.kstream.internals.KStreamTransformValuesTest > 
testTransform STARTED

org.apache.kafka.streams.kstream.internals.KStreamTransformValuesTest > 
testTransform PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullReducerOnReduce STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullReducerOnReduce PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameOnReduce STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameOnReduce PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullAdderOnWindowedAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullAdderOnWindowedAggregate PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullInitializerOnWindowedAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullInitializerOnWindowedAggregate PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullReducerWithWindowedReduce STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullReducerWithWindowedReduce PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameOnAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameOnAggregate PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullAdderOnAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullAdderOnAggregate PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullWindowsWithWindowedReduce STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullWindowsWithWindowedReduce PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullWindowsOnWindowedAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullWindowsOnWindowedAggregate PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameOnWindowedAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameOnWindowedAggregate PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameWithWindowedReduce STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullStoreNameWithWindowedReduce PASSED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullInitializerOnAggregate STARTED

org.apache.kafka.streams.kstream.internals.KGroupedStreamImplTest > 
shouldNotHaveNullInitializerOnAggregate PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testOuterJoin STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testOuterJoin PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > testJoin 
STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > testJoin 
PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testWindowing STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testWindowing PASSED


Jenkins build is back to normal : kafka-0.10.1-jdk7 #74

2016-10-14 Thread Apache Jenkins Server
See 



Build failed in Jenkins: kafka-trunk-jdk7 #1633

2016-10-14 Thread Apache Jenkins Server
See 

Changes:

[jason] KAFKA-4303; Ensure commitSync does not block unnecessarily in poll

--
[...truncated 1609 lines...]
kafka.api.AuthorizerIntegrationTest > testConsumeWithNoAccess STARTED

kafka.api.AuthorizerIntegrationTest > testConsumeWithNoAccess PASSED

kafka.api.AuthorizerIntegrationTest > testOffsetFetchWithTopicAndGroupRead 
STARTED

kafka.api.AuthorizerIntegrationTest > testOffsetFetchWithTopicAndGroupRead 
PASSED

kafka.api.AuthorizerIntegrationTest > testCommitWithTopicDescribe STARTED

kafka.api.AuthorizerIntegrationTest > testCommitWithTopicDescribe PASSED

kafka.api.AuthorizerIntegrationTest > testAuthorizationWithTopicExisting STARTED

kafka.api.AuthorizerIntegrationTest > testAuthorizationWithTopicExisting PASSED

kafka.api.AuthorizerIntegrationTest > testProduceWithTopicDescribe STARTED

kafka.api.AuthorizerIntegrationTest > testProduceWithTopicDescribe PASSED

kafka.api.AuthorizerIntegrationTest > 
testPatternSubscriptionMatchingInternalTopic STARTED

kafka.api.AuthorizerIntegrationTest > 
testPatternSubscriptionMatchingInternalTopic PASSED

kafka.api.AuthorizerIntegrationTest > testOffsetFetchTopicDescribe STARTED

kafka.api.AuthorizerIntegrationTest > testOffsetFetchTopicDescribe PASSED

kafka.api.AuthorizerIntegrationTest > testCommitWithTopicAndGroupRead STARTED

kafka.api.AuthorizerIntegrationTest > testCommitWithTopicAndGroupRead PASSED

kafka.api.AuthorizerIntegrationTest > 
testSimpleConsumeWithExplicitSeekAndNoGroupAccess STARTED

kafka.api.AuthorizerIntegrationTest > 
testSimpleConsumeWithExplicitSeekAndNoGroupAccess PASSED

kafka.api.SaslMultiMechanismConsumerTest > testMultipleBrokerMechanisms STARTED

kafka.api.SaslMultiMechanismConsumerTest > testMultipleBrokerMechanisms PASSED

kafka.api.SaslMultiMechanismConsumerTest > testCoordinatorFailover STARTED

kafka.api.SaslMultiMechanismConsumerTest > testCoordinatorFailover PASSED

kafka.api.SaslMultiMechanismConsumerTest > testSimpleConsumption STARTED

kafka.api.SaslMultiMechanismConsumerTest > testSimpleConsumption PASSED

kafka.api.PlaintextConsumerTest > testEarliestOrLatestOffsets STARTED

kafka.api.PlaintextConsumerTest > testEarliestOrLatestOffsets PASSED

kafka.api.PlaintextConsumerTest > testPartitionsForAutoCreate STARTED

kafka.api.PlaintextConsumerTest > testPartitionsForAutoCreate PASSED

kafka.api.PlaintextConsumerTest > testShrinkingTopicSubscriptions STARTED

kafka.api.PlaintextConsumerTest > testShrinkingTopicSubscriptions PASSED

kafka.api.PlaintextConsumerTest > testMaxPollIntervalMs STARTED

kafka.api.PlaintextConsumerTest > testMaxPollIntervalMs PASSED

kafka.api.PlaintextConsumerTest > testOffsetsForTimes STARTED

kafka.api.PlaintextConsumerTest > testOffsetsForTimes PASSED

kafka.api.PlaintextConsumerTest > testSubsequentPatternSubscription STARTED

kafka.api.PlaintextConsumerTest > testSubsequentPatternSubscription PASSED

kafka.api.PlaintextConsumerTest > testAsyncCommit STARTED

kafka.api.PlaintextConsumerTest > testAsyncCommit PASSED

kafka.api.PlaintextConsumerTest > testLowMaxFetchSizeForRequestAndPartition 
STARTED

kafka.api.PlaintextConsumerTest > testLowMaxFetchSizeForRequestAndPartition 
PASSED

kafka.api.PlaintextConsumerTest > testMultiConsumerSessionTimeoutOnStopPolling 
STARTED

kafka.api.PlaintextConsumerTest > testMultiConsumerSessionTimeoutOnStopPolling 
PASSED

kafka.api.PlaintextConsumerTest > testMaxPollIntervalMsDelayInRevocation STARTED

kafka.api.PlaintextConsumerTest > testMaxPollIntervalMsDelayInRevocation PASSED

kafka.api.PlaintextConsumerTest > testPartitionsForInvalidTopic STARTED

kafka.api.PlaintextConsumerTest > testPartitionsForInvalidTopic PASSED

kafka.api.PlaintextConsumerTest > testPauseStateNotPreservedByRebalance STARTED

kafka.api.PlaintextConsumerTest > testPauseStateNotPreservedByRebalance PASSED

kafka.api.PlaintextConsumerTest > 
testFetchHonoursFetchSizeIfLargeRecordNotFirst STARTED

kafka.api.PlaintextConsumerTest > 
testFetchHonoursFetchSizeIfLargeRecordNotFirst PASSED

kafka.api.PlaintextConsumerTest > testSeek STARTED

kafka.api.PlaintextConsumerTest > testSeek PASSED

kafka.api.PlaintextConsumerTest > testPositionAndCommit STARTED

kafka.api.PlaintextConsumerTest > testPositionAndCommit PASSED

kafka.api.PlaintextConsumerTest > 
testFetchRecordLargerThanMaxPartitionFetchBytes STARTED

kafka.api.PlaintextConsumerTest > 
testFetchRecordLargerThanMaxPartitionFetchBytes PASSED

kafka.api.PlaintextConsumerTest > testUnsubscribeTopic STARTED

kafka.api.PlaintextConsumerTest > testUnsubscribeTopic PASSED

kafka.api.PlaintextConsumerTest > testMultiConsumerSessionTimeoutOnClose STARTED

kafka.api.PlaintextConsumerTest > testMultiConsumerSessionTimeoutOnClose PASSED

kafka.api.PlaintextConsumerTest > testFetchRecordLargerThanFetchMaxBytes STARTED

kafka.api.PlaintextConsumerTest > testFetchRecordLargerThanFetchMaxBytes PASSED

kafka.api.PlaintextConsumerTest 

[VOTE] 0.10.1.0 RC3

2016-10-14 Thread Jason Gustafson
Hello Kafka users, developers and client-developers,

One more RC for 0.10.1.0. We're hoping this is the final one so that we can
meet the release target date of Oct. 17 (Monday). Please let me know as
soon as possible if you find any major problems.

Release plan: https://cwiki.apache.org/confluence/display/KAFKA/Rele
ase+Plan+0.10.1.

Release notes for the 0.10.1.0 release:
http://home.apache.org/~jgus/kafka-0.10.1.0-rc3/RELEASE_NOTES.html

*** Please download, test and vote by Monday, Oct 17, 5pm PT

Kafka's KEYS file containing PGP keys we use to sign the release:
http://kafka.apache.org/KEYS

* Release artifacts to be voted upon (source and binary):
http://home.apache.org/~jgus/kafka-0.10.1.0-rc3/

* Maven artifacts to be voted upon:
https://repository.apache.org/content/groups/staging/

* Javadoc:
http://home.apache.org/~jgus/kafka-0.10.1.0-rc3/javadoc/

* Tag to be voted upon (off 0.10.1 branch) is the 0.10.1.0-rc3 tag:
https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=tag;h=50f30a44f31fca1bd9189d2814388d51bd56b06b

* Documentation:
http://kafka.apache.org/0101/documentation.html

* Protocol:
http://kafka.apache.org/0101/protocol.html

* Tests:
Unit tests: https://builds.apache.org/job/kafka-0.10.1-jdk7/71/
System tests:
http://testing.confluent.io/confluent-kafka-0-10-1-system-test-results/?prefix=2016-10-13--001.1476369986--apache--0.10.1--ee212d1/

(Note that these tests do not include a couple patches merged today. I will
send links to updated test builds as soon as they are available)

Thanks,

Jason


[GitHub] kafka pull request #2031: KAFKA-4303: Ensure commitSync does not block unnec...

2016-10-14 Thread asfgit
Github user asfgit closed the pull request at:

https://github.com/apache/kafka/pull/2031


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[jira] [Commented] (KAFKA-4303) KafkaConsumer blocks unnecessarily in commitSync()

2016-10-14 Thread Jason Gustafson (JIRA)

[ 
https://issues.apache.org/jira/browse/KAFKA-4303?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15576520#comment-15576520
 ] 

Jason Gustafson commented on KAFKA-4303:


Revised the title. The unnecessary blocking caused by the issue above should be 
bounded in duration by the minimum of {{metadata.max.age.ms}} and 
{{request.timeout.ms}} (this upper bound is enforced by {{NetworkClient}}). We 
still probably need to fix this though because it prevents the offset commit 
from being sent and results in the consumer being kicked out of the group.

> KafkaConsumer blocks unnecessarily in commitSync() 
> ---
>
> Key: KAFKA-4303
> URL: https://issues.apache.org/jira/browse/KAFKA-4303
> Project: Kafka
>  Issue Type: Bug
>  Components: consumer
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Blocker
> Fix For: 0.10.1.0
>
>
> This appears to be a regression caused by the KIP-62 patch. It is possible 
> that we end up blocking indefinitely in {{NetworkClient.poll()}} with no 
> requests pending. The reason is that 
> {{ConsumerNetworkClient.poll(RequestFuture)}} does not actually verify that 
> the request has been sent prior to calling {{NetworkClient.poll()}}. This was 
> not possible previously because the maximum timeout was always bounded by the 
> heartbeat interval.
> This appears to be the cause of hanging builds that some people may have 
> experienced. In particular, I have seen {{ConsumerBounceTest}} hang because 
> of this problem.
> Note that another reason we can block indefinitely in {{commitSync()}} is 
> coordinator discovery. We do not attempt to fix this here.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (KAFKA-4303) KafkaConsumer blocks unnecessarily in commitSync()

2016-10-14 Thread Jason Gustafson (JIRA)

 [ 
https://issues.apache.org/jira/browse/KAFKA-4303?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson updated KAFKA-4303:
---
Description: 
This appears to be a regression caused by the KIP-62 patch. It is possible that 
we end up blocking unnecessarily in {{NetworkClient.poll()}} with no requests 
pending. The reason is that {{ConsumerNetworkClient.poll(RequestFuture)}} does 
not actually verify that the request has been sent prior to calling 
{{NetworkClient.poll()}}. This was not as much of a problem previously because 
the maximum timeout was always bounded by the heartbeat interval, which is 
typically small.

This appears to be the cause of hanging builds that some people may have 
experienced. In particular, I have seen {{ConsumerBounceTest}} hang because of 
this problem.

Note that another reason we can block indefinitely in {{commitSync()}} is 
coordinator discovery. We do not attempt to fix this here.

  was:
This appears to be a regression caused by the KIP-62 patch. It is possible that 
we end up blocking indefinitely in {{NetworkClient.poll()}} with no requests 
pending. The reason is that {{ConsumerNetworkClient.poll(RequestFuture)}} does 
not actually verify that the request has been sent prior to calling 
{{NetworkClient.poll()}}. This was not possible previously because the maximum 
timeout was always bounded by the heartbeat interval.

This appears to be the cause of hanging builds that some people may have 
experienced. In particular, I have seen {{ConsumerBounceTest}} hang because of 
this problem.

Note that another reason we can block indefinitely in {{commitSync()}} is 
coordinator discovery. We do not attempt to fix this here.


> KafkaConsumer blocks unnecessarily in commitSync() 
> ---
>
> Key: KAFKA-4303
> URL: https://issues.apache.org/jira/browse/KAFKA-4303
> Project: Kafka
>  Issue Type: Bug
>  Components: consumer
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Blocker
> Fix For: 0.10.1.0
>
>
> This appears to be a regression caused by the KIP-62 patch. It is possible 
> that we end up blocking unnecessarily in {{NetworkClient.poll()}} with no 
> requests pending. The reason is that 
> {{ConsumerNetworkClient.poll(RequestFuture)}} does not actually verify that 
> the request has been sent prior to calling {{NetworkClient.poll()}}. This was 
> not as much of a problem previously because the maximum timeout was always 
> bounded by the heartbeat interval, which is typically small.
> This appears to be the cause of hanging builds that some people may have 
> experienced. In particular, I have seen {{ConsumerBounceTest}} hang because 
> of this problem.
> Note that another reason we can block indefinitely in {{commitSync()}} is 
> coordinator discovery. We do not attempt to fix this here.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (KAFKA-4303) KafkaConsumer blocks unnecessarily in commitSync()

2016-10-14 Thread Jason Gustafson (JIRA)

 [ 
https://issues.apache.org/jira/browse/KAFKA-4303?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson updated KAFKA-4303:
---
Summary: KafkaConsumer blocks unnecessarily in commitSync()   (was: 
KafkaConsumer hangs indefinitely in commitSync() )

> KafkaConsumer blocks unnecessarily in commitSync() 
> ---
>
> Key: KAFKA-4303
> URL: https://issues.apache.org/jira/browse/KAFKA-4303
> Project: Kafka
>  Issue Type: Bug
>  Components: consumer
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Blocker
> Fix For: 0.10.1.0
>
>
> This appears to be a regression caused by the KIP-62 patch. It is possible 
> that we end up blocking indefinitely in {{NetworkClient.poll()}} with no 
> requests pending. The reason is that 
> {{ConsumerNetworkClient.poll(RequestFuture)}} does not actually verify that 
> the request has been sent prior to calling {{NetworkClient.poll()}}. This was 
> not possible previously because the maximum timeout was always bounded by the 
> heartbeat interval.
> This appears to be the cause of hanging builds that some people may have 
> experienced. In particular, I have seen {{ConsumerBounceTest}} hang because 
> of this problem.
> Note that another reason we can block indefinitely in {{commitSync()}} is 
> coordinator discovery. We do not attempt to fix this here.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[GitHub] kafka pull request #2026: MINOR: Improve on Streams log4j

2016-10-14 Thread asfgit
Github user asfgit closed the pull request at:

https://github.com/apache/kafka/pull/2026


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


Build failed in Jenkins: kafka-0.10.1-jdk7 #73

2016-10-14 Thread Apache Jenkins Server
See 

Changes:

[jason] MINOR: Some images should be centered in the documentation

--
[...truncated 6449 lines...]
kafka.api.AuthorizerIntegrationTest > testCommitWithTopicAndGroupRead STARTED

kafka.api.AuthorizerIntegrationTest > testCommitWithTopicAndGroupRead PASSED

kafka.api.AuthorizerIntegrationTest > 
testSimpleConsumeWithExplicitSeekAndNoGroupAccess STARTED

kafka.api.AuthorizerIntegrationTest > 
testSimpleConsumeWithExplicitSeekAndNoGroupAccess PASSED

kafka.api.AdminClientTest > testDescribeGroup STARTED

kafka.api.AdminClientTest > testDescribeGroup PASSED

kafka.api.AdminClientTest > testDescribeConsumerGroup STARTED

kafka.api.AdminClientTest > testDescribeConsumerGroup PASSED

kafka.api.AdminClientTest > testListGroups STARTED

kafka.api.AdminClientTest > testListGroups PASSED

kafka.api.AdminClientTest > testDescribeConsumerGroupForNonExistentGroup STARTED

kafka.api.AdminClientTest > testDescribeConsumerGroupForNonExistentGroup PASSED

kafka.api.ProducerBounceTest > testBrokerFailure STARTED

kafka.api.ProducerBounceTest > testBrokerFailure PASSED

kafka.api.ClientIdQuotaTest > testProducerConsumerOverrideUnthrottled STARTED

kafka.api.ClientIdQuotaTest > testProducerConsumerOverrideUnthrottled PASSED

kafka.api.ClientIdQuotaTest > testThrottledProducerConsumer STARTED

kafka.api.ClientIdQuotaTest > testThrottledProducerConsumer PASSED

kafka.api.ClientIdQuotaTest > testQuotaOverrideDelete STARTED

kafka.api.ClientIdQuotaTest > testQuotaOverrideDelete PASSED

kafka.api.test.ProducerCompressionTest > testCompression[0] STARTED

kafka.api.test.ProducerCompressionTest > testCompression[0] PASSED

kafka.api.test.ProducerCompressionTest > testCompression[1] STARTED

kafka.api.test.ProducerCompressionTest > testCompression[1] PASSED

kafka.api.test.ProducerCompressionTest > testCompression[2] STARTED

kafka.api.test.ProducerCompressionTest > testCompression[2] PASSED

kafka.api.test.ProducerCompressionTest > testCompression[3] STARTED

kafka.api.test.ProducerCompressionTest > testCompression[3] PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithoutDescribeAclViaSubscribe STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithoutDescribeAclViaSubscribe PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > testProduceConsumeViaAssign 
STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > testProduceConsumeViaAssign 
PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithDescribeAclViaAssign STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithDescribeAclViaAssign PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithDescribeAclViaSubscribe STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithDescribeAclViaSubscribe PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithoutDescribeAclViaAssign STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoConsumeWithoutDescribeAclViaAssign PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > testNoGroupAcl STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > testNoGroupAcl PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > testNoProduceWithDescribeAcl 
STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > testNoProduceWithDescribeAcl 
PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testProduceConsumeViaSubscribe STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testProduceConsumeViaSubscribe PASSED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoProduceWithoutDescribeAcl STARTED

kafka.api.SaslPlainSslEndToEndAuthorizationTest > 
testNoProduceWithoutDescribeAcl PASSED

kafka.api.ConsumerBounceTest > testSeekAndCommitWithBrokerFailures STARTED

kafka.api.ConsumerBounceTest > testSeekAndCommitWithBrokerFailures PASSED

kafka.api.ConsumerBounceTest > testConsumptionWithBrokerFailures STARTED

kafka.api.ConsumerBounceTest > testConsumptionWithBrokerFailures PASSED

kafka.api.SaslPlaintextConsumerTest > testCoordinatorFailover STARTED

kafka.api.SaslPlaintextConsumerTest > testCoordinatorFailover PASSED

kafka.api.SaslPlaintextConsumerTest > testSimpleConsumption STARTED

kafka.api.SaslPlaintextConsumerTest > testSimpleConsumption PASSED

kafka.api.SaslMultiMechanismConsumerTest > testMultipleBrokerMechanisms STARTED

kafka.api.SaslMultiMechanismConsumerTest > testMultipleBrokerMechanisms PASSED

kafka.api.SaslMultiMechanismConsumerTest > testCoordinatorFailover STARTED

kafka.api.SaslMultiMechanismConsumerTest > testCoordinatorFailover PASSED

kafka.api.SaslMultiMechanismConsumerTest > testSimpleConsumption STARTED

kafka.api.SaslMultiMechanismConsumerTest > testSimpleConsumption PASSED

kafka.api.FetchRequestTest > testShuffleWithSingleTopic STARTED

kafka.api.FetchRequestTest > testShuffleWithSingleTopic PASSED


[jira] [Created] (KAFKA-4304) Extend Interactive Queries for return latest update timestamp per key

2016-10-14 Thread Matthias J. Sax (JIRA)
Matthias J. Sax created KAFKA-4304:
--

 Summary: Extend Interactive Queries for return latest update 
timestamp per key
 Key: KAFKA-4304
 URL: https://issues.apache.org/jira/browse/KAFKA-4304
 Project: Kafka
  Issue Type: Improvement
  Components: streams
Reporter: Matthias J. Sax
Assignee: Guozhang Wang
Priority: Minor


Currently, when querying state store, it is not clear when the key was updated 
last. The ides of this JIRA is to make the latest update timestamp for each 
key-value-pair of the state store accessible.

For example, this might be useful to
 * check if a value was update but did not changed (just compare the update TS)
 * if you want to consider only recently updated keys



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (KAFKA-4303) KafkaConsumer hangs indefinitely in commitSync()

2016-10-14 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/KAFKA-4303?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15576402#comment-15576402
 ] 

ASF GitHub Bot commented on KAFKA-4303:
---

GitHub user hachikuji opened a pull request:

https://github.com/apache/kafka/pull/2031

KAFKA-4303: Ensure commitSync does not block indefinitely in poll without 
in-flight requests



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/hachikuji/kafka KAFKA-4303

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2031.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2031


commit ccba1a4ee7626230d9a6aed1e966d54d6e509c26
Author: Jason Gustafson 
Date:   2016-10-14T20:29:06Z

KAFKA-4303: Ensure commitSync does not block indefinitely in poll without 
in-flight requests




> KafkaConsumer hangs indefinitely in commitSync() 
> -
>
> Key: KAFKA-4303
> URL: https://issues.apache.org/jira/browse/KAFKA-4303
> Project: Kafka
>  Issue Type: Bug
>  Components: consumer
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Blocker
> Fix For: 0.10.1.0
>
>
> This appears to be a regression caused by the KIP-62 patch. It is possible 
> that we end up blocking indefinitely in {{NetworkClient.poll()}} with no 
> requests pending. The reason is that 
> {{ConsumerNetworkClient.poll(RequestFuture)}} does not actually verify that 
> the request has been sent prior to calling {{NetworkClient.poll()}}. This was 
> not possible previously because the maximum timeout was always bounded by the 
> heartbeat interval.
> This appears to be the cause of hanging builds that some people may have 
> experienced. In particular, I have seen {{ConsumerBounceTest}} hang because 
> of this problem.
> Note that another reason we can block indefinitely in {{commitSync()}} is 
> coordinator discovery. We do not attempt to fix this here.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[GitHub] kafka pull request #2031: KAFKA-4303: Ensure commitSync does not block indef...

2016-10-14 Thread hachikuji
GitHub user hachikuji opened a pull request:

https://github.com/apache/kafka/pull/2031

KAFKA-4303: Ensure commitSync does not block indefinitely in poll without 
in-flight requests



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/hachikuji/kafka KAFKA-4303

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2031.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2031


commit ccba1a4ee7626230d9a6aed1e966d54d6e509c26
Author: Jason Gustafson 
Date:   2016-10-14T20:29:06Z

KAFKA-4303: Ensure commitSync does not block indefinitely in poll without 
in-flight requests




---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


Re: [VOTE] 0.10.1.0 RC2

2016-10-14 Thread Jason Gustafson
Hey Tom, thanks a lot for doing the performance testing!

-Jason

On Fri, Oct 14, 2016 at 8:04 AM, Tom Crayford  wrote:

> Heroku has tested this using the same performance testing setup we used to
> evaluate the impact of 0.9 -> 0.10 (see https://
> engineering.heroku.com/blogs/2016-05-27-apache-kafka-010-
> evaluating-performance-in-distributed-systems/).
>
> We see no issues at all with them on RC2.
>
> On Thu, Oct 13, 2016 at 11:04 PM, Jason Gustafson 
> wrote:
>
> > Thanks Vahid, I'll see if I can reproduce the problem you're seeing on
> Step
> > 6 of the quickstart.
> >
> > -Jason
> >
> > On Thu, Oct 13, 2016 at 1:04 PM, Vahid S Hashemian <
> > vahidhashem...@us.ibm.com> wrote:
> >
> > > Hi Jason,
> > >
> > > I tested the quickstart again with the new RC on Ubuntu Linux, Windows,
> > > and Mac.
> > >
> > > There are two more issues I noticed:
> > > I have not been able to run the Streams example on my Windows
> > environments
> > > (when I build from the source, and also when I use the built version)
> due
> > > to some errors that I'm getting, but I suspect there are issues with my
> > > environments.
> > > When I go through the Step 6 (muti-broker example) when the leader is
> > > broker 0 and I shut it down when I run the new consumer no records are
> > > returned (I make sure I use the port of a live broker for
> > bootstrap-server
> > > argument). As soon as I restart the stopped broker (0), records are
> > > returned. If the leader is broker 1 or 2 I don't run into this issue.
> If
> > I
> > > use the old consumer I don't run into the issue either. I have been
> able
> > > to reproduce this consistently on all three OS's above.
> > >
> > > --Vahid
> > >
> > >
> > >
> > > From:   Jason Gustafson 
> > > To: dev@kafka.apache.org, Kafka Users ,
> > > kafka-clients 
> > > Date:   10/12/2016 10:41 AM
> > > Subject:[VOTE] 0.10.1.0 RC2
> > >
> > >
> > >
> > > Hello Kafka users, developers and client-developers,
> > >
> > > One more RC for 0.10.1.0. I think we're getting close!
> > >
> > > Release plan: https://cwiki.apache.org/confluence/display/KAFKA/Rele
> > > ase+Plan+0.10.1.
> > >
> > > Release notes for the 0.10.1.0 release:
> > > http://home.apache.org/~jgus/kafka-0.10.1.0-rc2/RELEASE_NOTES.html
> > >
> > > *** Please download, test and vote by Saturday, Oct 15, 11am PT
> > >
> > > Kafka's KEYS file containing PGP keys we use to sign the release:
> > > http://kafka.apache.org/KEYS
> > >
> > > * Release artifacts to be voted upon (source and binary):
> > > http://home.apache.org/~jgus/kafka-0.10.1.0-rc2/
> > >
> > > * Maven artifacts to be voted upon:
> > > https://repository.apache.org/content/groups/staging/
> > >
> > > * Javadoc:
> > > http://home.apache.org/~jgus/kafka-0.10.1.0-rc2/javadoc/
> > >
> > > * Tag to be voted upon (off 0.10.1 branch) is the 0.10.1.0-rc2 tag:
> > > https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=tag;h=
> > > 8702d66434b86092a3738472f9186d6845ab0720
> > >
> > > * Documentation:
> > > http://kafka.apache.org/0101/documentation.html
> > >
> > > * Protocol:
> > > http://kafka.apache.org/0101/protocol.html
> > >
> > > * Tests:
> > > Unit tests: https://builds.apache.org/job/kafka-0.10.1-jdk7/68/
> > > System tests: http://confluent-kafka-0-10-1-system-test-results.s3-
> > > us-west-2.amazonaws.com/2016-10-11--001.1476197348--apache-
> > > -0.10.1--d981dd2/
> > >
> > > Thanks,
> > >
> > > Jason
> > >
> > >
> > >
> > >
> > >
> >
>


Jenkins build is back to normal : kafka-trunk-jdk8 #979

2016-10-14 Thread Apache Jenkins Server
See 



[jira] [Created] (KAFKA-4303) KafkaConsumer hangs indefinitely in commitSync()

2016-10-14 Thread Jason Gustafson (JIRA)
Jason Gustafson created KAFKA-4303:
--

 Summary: KafkaConsumer hangs indefinitely in commitSync() 
 Key: KAFKA-4303
 URL: https://issues.apache.org/jira/browse/KAFKA-4303
 Project: Kafka
  Issue Type: Bug
  Components: consumer
Reporter: Jason Gustafson
Assignee: Jason Gustafson
Priority: Blocker
 Fix For: 0.10.1.0


This appears to be a regression caused by the KIP-62 patch. It is possible that 
we end up blocking indefinitely in {{NetworkClient.poll()}} with no requests 
pending. The reason is that {{ConsumerNetworkClient.poll(RequestFuture)}} does 
not actually verify that the request has been sent prior to calling 
{{NetworkClient.poll()}}. This was not possible previously because the maximum 
timeout was always bounded by the heartbeat interval.

This appears to be the cause of hanging builds that some people may have 
experienced. In particular, I have seen {{ConsumerBounceTest}} hang because of 
this problem.

Note that another reason we can block indefinitely in {{commitSync()}} is 
coordinator discovery. We do not attempt to fix this here.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[GitHub] kafka pull request #2028: Replace default X509TrustManager with ReloadableX5...

2016-10-14 Thread allenxiang
Github user allenxiang closed the pull request at:

https://github.com/apache/kafka/pull/2028


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] kafka pull request #2030: MINOR: Added more basic concepts to the documentat...

2016-10-14 Thread enothereska
GitHub user enothereska opened a pull request:

https://github.com/apache/kafka/pull/2030

MINOR: Added more basic concepts to the documentation [WiP]



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/enothereska/kafka minor-kip63-docs

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2030.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2030


commit d6326282e2d7fb0276f65a75c0cb9c41414802f1
Author: Eno Thereska 
Date:   2016-10-14T17:06:03Z

Added basic concepts




---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] kafka pull request #2029: MINOR: Some images should be centered in the docum...

2016-10-14 Thread asfgit
Github user asfgit closed the pull request at:

https://github.com/apache/kafka/pull/2029


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[GitHub] kafka pull request #2029: MINOR: Some images should be centered in the docum...

2016-10-14 Thread hachikuji
GitHub user hachikuji opened a pull request:

https://github.com/apache/kafka/pull/2029

MINOR: Some images should be centered in the documentation



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/hachikuji/kafka center-some-images

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2029.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2029


commit 2663e28a0610ba2b968487ffdde51423fa3d8fb3
Author: Jason Gustafson 
Date:   2016-10-14T16:57:21Z

MINOR: Some images should be centered in the documentation




---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[jira] [Commented] (KAFKA-4254) Questionable handling of unknown partitions in KafkaProducer

2016-10-14 Thread Jeff Widman (JIRA)

[ 
https://issues.apache.org/jira/browse/KAFKA-4254?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15575850#comment-15575850
 ] 

Jeff Widman commented on KAFKA-4254:


Does this affect 10.0 or just 10.1?

> Questionable handling of unknown partitions in KafkaProducer
> 
>
> Key: KAFKA-4254
> URL: https://issues.apache.org/jira/browse/KAFKA-4254
> Project: Kafka
>  Issue Type: Bug
>  Components: producer 
>Reporter: Jason Gustafson
>Assignee: Konstantine Karantasis
> Fix For: 0.10.1.0
>
>
> Currently the producer will raise an {{IllegalArgumentException}} if the user 
> attempts to write to a partition which has just been created. This is caused 
> by the fact that the producer does not attempt to refetch topic metadata in 
> this case, which means that its check for partition validity is based on 
> stale metadata.
> If the topic for the partition did not already exist, it works fine because 
> the producer will block until it has metadata for the topic, so this case is 
> primarily hit when the number of partitions is dynamically increased. 
> A couple options to fix this that come to mind:
> 1. We could treat unknown partitions just as we do unknown topics. If the 
> partition doesn't exist, we refetch metadata and try again (timing out when 
> max.block.ms is reached).
> 2. We can at least throw a more specific exception so that users can handle 
> the error. Raising {{IllegalArgumentException}} is not helpful in practice 
> because it can also be caused by other error.s
> My inclination is to do the first one since the producer seems incorrect to 
> tell the user that the partition is invalid.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


Re: [VOTE] 0.10.1.0 RC2

2016-10-14 Thread Tom Crayford
Heroku has tested this using the same performance testing setup we used to
evaluate the impact of 0.9 -> 0.10 (see https://
engineering.heroku.com/blogs/2016-05-27-apache-kafka-010-
evaluating-performance-in-distributed-systems/).

We see no issues at all with them on RC2.

On Thu, Oct 13, 2016 at 11:04 PM, Jason Gustafson 
wrote:

> Thanks Vahid, I'll see if I can reproduce the problem you're seeing on Step
> 6 of the quickstart.
>
> -Jason
>
> On Thu, Oct 13, 2016 at 1:04 PM, Vahid S Hashemian <
> vahidhashem...@us.ibm.com> wrote:
>
> > Hi Jason,
> >
> > I tested the quickstart again with the new RC on Ubuntu Linux, Windows,
> > and Mac.
> >
> > There are two more issues I noticed:
> > I have not been able to run the Streams example on my Windows
> environments
> > (when I build from the source, and also when I use the built version) due
> > to some errors that I'm getting, but I suspect there are issues with my
> > environments.
> > When I go through the Step 6 (muti-broker example) when the leader is
> > broker 0 and I shut it down when I run the new consumer no records are
> > returned (I make sure I use the port of a live broker for
> bootstrap-server
> > argument). As soon as I restart the stopped broker (0), records are
> > returned. If the leader is broker 1 or 2 I don't run into this issue. If
> I
> > use the old consumer I don't run into the issue either. I have been able
> > to reproduce this consistently on all three OS's above.
> >
> > --Vahid
> >
> >
> >
> > From:   Jason Gustafson 
> > To: dev@kafka.apache.org, Kafka Users ,
> > kafka-clients 
> > Date:   10/12/2016 10:41 AM
> > Subject:[VOTE] 0.10.1.0 RC2
> >
> >
> >
> > Hello Kafka users, developers and client-developers,
> >
> > One more RC for 0.10.1.0. I think we're getting close!
> >
> > Release plan: https://cwiki.apache.org/confluence/display/KAFKA/Rele
> > ase+Plan+0.10.1.
> >
> > Release notes for the 0.10.1.0 release:
> > http://home.apache.org/~jgus/kafka-0.10.1.0-rc2/RELEASE_NOTES.html
> >
> > *** Please download, test and vote by Saturday, Oct 15, 11am PT
> >
> > Kafka's KEYS file containing PGP keys we use to sign the release:
> > http://kafka.apache.org/KEYS
> >
> > * Release artifacts to be voted upon (source and binary):
> > http://home.apache.org/~jgus/kafka-0.10.1.0-rc2/
> >
> > * Maven artifacts to be voted upon:
> > https://repository.apache.org/content/groups/staging/
> >
> > * Javadoc:
> > http://home.apache.org/~jgus/kafka-0.10.1.0-rc2/javadoc/
> >
> > * Tag to be voted upon (off 0.10.1 branch) is the 0.10.1.0-rc2 tag:
> > https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=tag;h=
> > 8702d66434b86092a3738472f9186d6845ab0720
> >
> > * Documentation:
> > http://kafka.apache.org/0101/documentation.html
> >
> > * Protocol:
> > http://kafka.apache.org/0101/protocol.html
> >
> > * Tests:
> > Unit tests: https://builds.apache.org/job/kafka-0.10.1-jdk7/68/
> > System tests: http://confluent-kafka-0-10-1-system-test-results.s3-
> > us-west-2.amazonaws.com/2016-10-11--001.1476197348--apache-
> > -0.10.1--d981dd2/
> >
> > Thanks,
> >
> > Jason
> >
> >
> >
> >
> >
>


Query on kafka queue message purge.

2016-10-14 Thread Rudra Moharana
Hi Team,

I need a help for my query
Is there any way to remove the message from kafka queue with out stopping
zookeeper or topic server or cluster.

Thanks,
Rudra


[GitHub] kafka pull request #2028: Replace default X509TrustManager with ReloadableX5...

2016-10-14 Thread allenxiang
GitHub user allenxiang opened a pull request:

https://github.com/apache/kafka/pull/2028

Replace default X509TrustManager with ReloadableX509TrustManager.

ReloadableX509TrustManager will allow us to replace broker tuststore at any 
time without restart the broker.

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/allenxiang/kafka trunk

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2028.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2028


commit 7514359bd3d6f4a13d373f70abd3a92150714a89
Author: Allen Xiang 
Date:   2016-10-14T13:13:26Z

Replace default X509TrustManager with ReloadableX509TrustManager.




---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[jira] [Commented] (KAFKA-4299) Consumer offsets reset for all topics after increasing partitions for one topic

2016-10-14 Thread Juho Autio (JIRA)

[ 
https://issues.apache.org/jira/browse/KAFKA-4299?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15574930#comment-15574930
 ] 

Juho Autio commented on KAFKA-4299:
---

I altered some topics in test environment and no offsets were reset this time. 
I will try altering a topic next week in production to see if it happens again.

> Consumer offsets reset for all topics after increasing partitions for one 
> topic
> ---
>
> Key: KAFKA-4299
> URL: https://issues.apache.org/jira/browse/KAFKA-4299
> Project: Kafka
>  Issue Type: Bug
>Affects Versions: 0.10.0.1
>Reporter: Juho Autio
>
> I increased partitions for one existing topic (2->10), but was surprised to 
> see that it entirely reset the committed offsets of my consumer group.
> All topics & partitions were reset to the earliest offset available, and the 
> consumer read everything again.
> Documentation doesn't mention anything like this. Is this how it's supposed 
> to work, or a bug?
> I would've expected the consumer offsets to not decrease at all, especially 
> for the topics that I didn't even touch.
> For the altered topic I would've expected that consuming the previously 
> existing partitions 0 and 1 would've continued from the position where they 
> were, and naturally starting to read the new added partitions from 0.
> I added partitions according to the "Modifying topics" section of Kafka 
> 0.10.0 Documentation:
> {quote}
> To add partitions you can do
> {code}
>  > bin/kafka-topics.sh --zookeeper $ZOOKEEPER_HOST --alter --topic 
> altered_topic --partitions 10
> {code}
> {quote}
> Previously this topic had 2 partitions.
> For the consumer I'm using 
> {{kafka.javaapi.consumer.ConsumerConnector.createMessageStreamsByFilter()}}.
> And version is:
> {code}
> org.apache.kafka
> kafka_2.11
> 0.10.0.1
> {code}
> Kafka cluster itself is {{kafka_2.11-0.10.0.1}}.
> This is quite problematic because we can't afford waiting for consumers to 
> read the full buffer from the beginning (for all topics!) when increasing 
> partitions for a topic.
> Some possibly relevant settings we have for the consumer:
> {code}
> kafka.partition.assignment.strategy = "range"
> kafka.auto.offset.reset = "smallest"
> kafka.auto.commit.enable = "false"
> kafka.offsets.storage = "kafka"
> kafka.dual.commit.enabled = false
> kafka.consumer.timeout.ms = "2000"
> kafka.auto.create.topics.enable = true
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Commented] (KAFKA-4301) Include some SSL/TLS logging to avoid need need for javax debug util every time an issue arises

2016-10-14 Thread ASF GitHub Bot (JIRA)

[ 
https://issues.apache.org/jira/browse/KAFKA-4301?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15574780#comment-15574780
 ] 

ASF GitHub Bot commented on KAFKA-4301:
---

GitHub user rajinisivaram opened a pull request:

https://github.com/apache/kafka/pull/2027

KAFKA-4301: Add more trace for SSL handshake



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/rajinisivaram/kafka KAFKA-4301

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2027.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2027


commit 8ef6ad6d7f465224be73166077f5a9394fff48c1
Author: Rajini Sivaram 
Date:   2016-10-14T09:03:05Z

KAFKA-4301: Add more trace for SSL handshake




> Include some SSL/TLS logging to avoid need need for javax debug util every 
> time an issue arises 
> 
>
> Key: KAFKA-4301
> URL: https://issues.apache.org/jira/browse/KAFKA-4301
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Ryan P
>Assignee: Rajini Sivaram
>
> It would be handy to include certain transport layer session attributes in at 
> least the debug level logging within Kafka. Specifically with regard to 
> TLS/SSL communications. 
> Some of the things it would be helpful to see without having to enable the 
> javax network debug utility include: 
> 1. Negotiated cipher suite 
> 2. Authenticated client principal 
> Technically item2 is covered with the authorizer logging but it would be nice 
> to have this information available even in the absence of an authorizer 
> implementation. 



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[GitHub] kafka pull request #2027: KAFKA-4301: Add more trace for SSL handshake

2016-10-14 Thread rajinisivaram
GitHub user rajinisivaram opened a pull request:

https://github.com/apache/kafka/pull/2027

KAFKA-4301: Add more trace for SSL handshake



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/rajinisivaram/kafka KAFKA-4301

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/kafka/pull/2027.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #2027


commit 8ef6ad6d7f465224be73166077f5a9394fff48c1
Author: Rajini Sivaram 
Date:   2016-10-14T09:03:05Z

KAFKA-4301: Add more trace for SSL handshake




---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---


[jira] [Commented] (KAFKA-4294) Allow password file in server.properties to separate 'secrets' from standard configs

2016-10-14 Thread Rajini Sivaram (JIRA)

[ 
https://issues.apache.org/jira/browse/KAFKA-4294?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15574737#comment-15574737
 ] 

Rajini Sivaram commented on KAFKA-4294:
---

It may be good to bring this up in KIP-76 discussion:
https://cwiki.apache.org/confluence/display/KAFKA/KIP-76+Enable+getting+password+from+executable+rather+than+passing+as+plaintext+in+config+files

> Allow password file in server.properties to separate 'secrets' from standard 
> configs 
> -
>
> Key: KAFKA-4294
> URL: https://issues.apache.org/jira/browse/KAFKA-4294
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Ryan P
>
> Java's keytool(for Windows) allows you to specify the keystore/truststore 
> password with an external file in addition to a string argument. 
> -storepass:file secret.txt
> http://docs.oracle.com/javase/7/docs/technotes/tools/windows/keytool.html
> It would be nice if Kafka could offer the same functionality allowing 
> organizations to separate concerns between standard configs and 'secrets'. 
> Ideally Kafka would add a secrets file property to the broker config which 
> could override any ssl properties which currently exist within the broker 
> config. Since the secrets file property is only used to override existing 
> SSL/TLS properties the change maintains backward compatibility. 



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


Jenkins build is back to normal : kafka-trunk-jdk7 #1630

2016-10-14 Thread Apache Jenkins Server
See 



Build failed in Jenkins: kafka-0.10.1-jdk7 #72

2016-10-14 Thread Apache Jenkins Server
See 

--
[...truncated 14144 lines...]
org.apache.kafka.streams.kstream.internals.KeyValuePrinterProcessorTest > 
testPrintKeyValueDefaultSerde PASSED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > 
testSendingOldValue STARTED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > 
testSendingOldValue PASSED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > 
testNotSendingOldValue STARTED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > 
testNotSendingOldValue PASSED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > testKTable STARTED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > testKTable PASSED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > testValueGetter 
STARTED

org.apache.kafka.streams.kstream.internals.KTableSourceTest > testValueGetter 
PASSED

org.apache.kafka.streams.kstream.internals.KTableMapKeysTest > 
testMapKeysConvertingToStream STARTED

org.apache.kafka.streams.kstream.internals.KTableMapKeysTest > 
testMapKeysConvertingToStream PASSED

org.apache.kafka.streams.kstream.internals.KStreamForeachTest > testForeach 
STARTED

org.apache.kafka.streams.kstream.internals.KStreamForeachTest > testForeach 
PASSED

org.apache.kafka.streams.kstream.internals.KTableKTableOuterJoinTest > 
testSendingOldValue STARTED

org.apache.kafka.streams.kstream.internals.KTableKTableOuterJoinTest > 
testSendingOldValue PASSED

org.apache.kafka.streams.kstream.internals.KTableKTableOuterJoinTest > testJoin 
STARTED

org.apache.kafka.streams.kstream.internals.KTableKTableOuterJoinTest > testJoin 
PASSED

org.apache.kafka.streams.kstream.internals.KTableKTableOuterJoinTest > 
testNotSendingOldValue STARTED

org.apache.kafka.streams.kstream.internals.KTableKTableOuterJoinTest > 
testNotSendingOldValue PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testOuterJoin STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testOuterJoin PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > testJoin 
STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > testJoin 
PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testWindowing STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testWindowing PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testAsymetricWindowingBefore STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testAsymetricWindowingBefore PASSED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testAsymetricWindowingAfter STARTED

org.apache.kafka.streams.kstream.internals.KStreamKStreamJoinTest > 
testAsymetricWindowingAfter PASSED

org.apache.kafka.streams.kstream.internals.KStreamFlatMapValuesTest > 
testFlatMapValues STARTED

org.apache.kafka.streams.kstream.internals.KStreamFlatMapValuesTest > 
testFlatMapValues PASSED

org.apache.kafka.streams.kstream.internals.KTableKTableJoinTest > testJoin 
STARTED

org.apache.kafka.streams.kstream.internals.KTableKTableJoinTest > testJoin 
PASSED

org.apache.kafka.streams.kstream.internals.KTableKTableJoinTest > 
testNotSendingOldValues STARTED

org.apache.kafka.streams.kstream.internals.KTableKTableJoinTest > 
testNotSendingOldValues PASSED

org.apache.kafka.streams.kstream.internals.KTableKTableJoinTest > 
testSendingOldValues STARTED

org.apache.kafka.streams.kstream.internals.KTableKTableJoinTest > 
testSendingOldValues PASSED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > testAggBasic 
STARTED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > testAggBasic 
PASSED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > testCount 
STARTED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > testCount 
PASSED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testAggCoalesced STARTED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testAggCoalesced PASSED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testAggRepartition STARTED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testAggRepartition PASSED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testRemoveOldBeforeAddNew STARTED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testRemoveOldBeforeAddNew PASSED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testCountCoalesced STARTED

org.apache.kafka.streams.kstream.internals.KTableAggregateTest > 
testCountCoalesced PASSED

org.apache.kafka.streams.kstream.internals.KStreamFilterTest > testFilterNot 
STARTED

org.apache.kafka.streams.kstream.internals.KStreamFilterTest > testFilterNot 
PASSED

org.apache.kafka.streams.kstream.internals.KStreamFilterTest > testFilter 
STARTED


[jira] [Assigned] (KAFKA-4301) Include some SSL/TLS logging to avoid need need for javax debug util every time an issue arises

2016-10-14 Thread Rajini Sivaram (JIRA)

 [ 
https://issues.apache.org/jira/browse/KAFKA-4301?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Rajini Sivaram reassigned KAFKA-4301:
-

Assignee: Rajini Sivaram

> Include some SSL/TLS logging to avoid need need for javax debug util every 
> time an issue arises 
> 
>
> Key: KAFKA-4301
> URL: https://issues.apache.org/jira/browse/KAFKA-4301
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Ryan P
>Assignee: Rajini Sivaram
>
> It would be handy to include certain transport layer session attributes in at 
> least the debug level logging within Kafka. Specifically with regard to 
> TLS/SSL communications. 
> Some of the things it would be helpful to see without having to enable the 
> javax network debug utility include: 
> 1. Negotiated cipher suite 
> 2. Authenticated client principal 
> Technically item2 is covered with the authorizer logging but it would be nice 
> to have this information available even in the absence of an authorizer 
> implementation. 



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Created] (KAFKA-4302) Simplify KTableSource

2016-10-14 Thread Matthias J. Sax (JIRA)
Matthias J. Sax created KAFKA-4302:
--

 Summary: Simplify KTableSource
 Key: KAFKA-4302
 URL: https://issues.apache.org/jira/browse/KAFKA-4302
 Project: Kafka
  Issue Type: Improvement
  Components: streams
Reporter: Matthias J. Sax
Assignee: Guozhang Wang
Priority: Minor


With the new "interactive queries" feature, source tables are always 
materialized. Thus, we can remove the stale flag {{KTableSoure#materialized}} 
(which is always true now) to simply to code.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (KAFKA-4275) Check of State-Store-assignment to Processor-Nodes is not enabled

2016-10-14 Thread Matthias J. Sax (JIRA)

 [ 
https://issues.apache.org/jira/browse/KAFKA-4275?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Matthias J. Sax updated KAFKA-4275:
---
Status: Patch Available  (was: In Progress)

> Check of State-Store-assignment to Processor-Nodes is not enabled
> -
>
> Key: KAFKA-4275
> URL: https://issues.apache.org/jira/browse/KAFKA-4275
> Project: Kafka
>  Issue Type: Bug
>  Components: streams
>Reporter: Matthias J. Sax
>Assignee: Matthias J. Sax
>
> In {{ProcessorContextImpl#getStateStores()}} we should check if a store was 
> connected to the processor and thus, if the processor is allowed to access 
> the store. This check is currently disabled.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


[jira] [Updated] (KAFKA-4275) Check of State-Store-assignment to Processor-Nodes is not enabled

2016-10-14 Thread Matthias J. Sax (JIRA)

 [ 
https://issues.apache.org/jira/browse/KAFKA-4275?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Matthias J. Sax updated KAFKA-4275:
---
Status: In Progress  (was: Patch Available)

> Check of State-Store-assignment to Processor-Nodes is not enabled
> -
>
> Key: KAFKA-4275
> URL: https://issues.apache.org/jira/browse/KAFKA-4275
> Project: Kafka
>  Issue Type: Bug
>  Components: streams
>Reporter: Matthias J. Sax
>Assignee: Matthias J. Sax
>
> In {{ProcessorContextImpl#getStateStores()}} we should check if a store was 
> connected to the processor and thus, if the processor is allowed to access 
> the store. This check is currently disabled.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)


Build failed in Jenkins: kafka-trunk-jdk8 #978

2016-10-14 Thread Apache Jenkins Server
See 

Changes:

[jason] KAFKA-4254; Update producer's metadata before failing on non-existent

[jason] KAFKA-4298; Ensure compressed message sets are not converted when log

--
[...truncated 14184 lines...]

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedConsumerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedConsumerConfigs PASSED

org.apache.kafka.streams.StreamsConfigTest > testGetProducerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > testGetProducerConfigs PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldThrowStreamsExceptionIfValueSerdeConfigFails STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldThrowStreamsExceptionIfValueSerdeConfigFails PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedProducerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedProducerConfigs PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedPropertiesThatAreNotPartOfRestoreConsumerConfig STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedPropertiesThatAreNotPartOfRestoreConsumerConfig PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedPropertiesThatAreNotPartOfProducerConfig STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedPropertiesThatAreNotPartOfProducerConfig PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldBeSupportNonPrefixedConsumerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldBeSupportNonPrefixedConsumerConfigs PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedPropertiesThatAreNotPartOfConsumerConfig STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedPropertiesThatAreNotPartOfConsumerConfig PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportMultipleBootstrapServers STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportMultipleBootstrapServers PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldThrowStreamsExceptionIfKeySerdeConfigFails STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldThrowStreamsExceptionIfKeySerdeConfigFails PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportNonPrefixedProducerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportNonPrefixedProducerConfigs PASSED

org.apache.kafka.streams.StreamsConfigTest > testGetRestoreConsumerConfigs 
STARTED

org.apache.kafka.streams.StreamsConfigTest > testGetRestoreConsumerConfigs 
PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldBeSupportNonPrefixedRestoreConsumerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldBeSupportNonPrefixedRestoreConsumerConfigs PASSED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedRestoreConsumerConfigs STARTED

org.apache.kafka.streams.StreamsConfigTest > 
shouldSupportPrefixedRestoreConsumerConfigs PASSED

org.apache.kafka.streams.KafkaStreamsTest > shouldNotGetAllTasksWhenNotRunning 
STARTED

org.apache.kafka.streams.KafkaStreamsTest > shouldNotGetAllTasksWhenNotRunning 
PASSED

org.apache.kafka.streams.KafkaStreamsTest > 
shouldNotGetTaskWithKeyAndPartitionerWhenNotRunning STARTED

org.apache.kafka.streams.KafkaStreamsTest > 
shouldNotGetTaskWithKeyAndPartitionerWhenNotRunning PASSED

org.apache.kafka.streams.KafkaStreamsTest > 
shouldNotGetTaskWithKeyAndSerializerWhenNotRunning STARTED

org.apache.kafka.streams.KafkaStreamsTest > 
shouldNotGetTaskWithKeyAndSerializerWhenNotRunning PASSED

org.apache.kafka.streams.KafkaStreamsTest > 
shouldNotGetAllTasksWithStoreWhenNotRunning STARTED

org.apache.kafka.streams.KafkaStreamsTest > 
shouldNotGetAllTasksWithStoreWhenNotRunning PASSED

org.apache.kafka.streams.KafkaStreamsTest > testCannotStartOnceClosed STARTED

org.apache.kafka.streams.KafkaStreamsTest > testCannotStartOnceClosed PASSED

org.apache.kafka.streams.KafkaStreamsTest > testCleanup STARTED

org.apache.kafka.streams.KafkaStreamsTest > testCleanup PASSED

org.apache.kafka.streams.KafkaStreamsTest > testStartAndClose STARTED

org.apache.kafka.streams.KafkaStreamsTest > testStartAndClose PASSED

org.apache.kafka.streams.KafkaStreamsTest > testCloseIsIdempotent STARTED

org.apache.kafka.streams.KafkaStreamsTest > testCloseIsIdempotent PASSED

org.apache.kafka.streams.KafkaStreamsTest > testCannotCleanupWhileRunning 
STARTED

org.apache.kafka.streams.KafkaStreamsTest > testCannotCleanupWhileRunning PASSED

org.apache.kafka.streams.KafkaStreamsTest > testCannotStartTwice STARTED

org.apache.kafka.streams.KafkaStreamsTest > testCannotStartTwice PASSED

org.apache.kafka.streams.integration.KStreamKTableJoinIntegrationTest > 
shouldCountClicksPerRegion[0] STARTED

org.apache.kafka.streams.integration.KStreamKTableJoinIntegrationTest > 
shouldCountClicksPerRegion[0]