svn commit: r1656965 [3/11] - in /kafka/site/082: ./ javadoc/ javadoc/org/ javadoc/org/apache/ javadoc/org/apache/kafka/ javadoc/org/apache/kafka/clients/ javadoc/org/apache/kafka/clients/producer/ ja

2015-02-03 Thread junrao
Added: kafka/site/082/javadoc/index.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/javadoc/index.html?rev=1656965view=auto
==
--- kafka/site/082/javadoc/index.html (added)
+++ kafka/site/082/javadoc/index.html Wed Feb  4 01:11:43 2015
@@ -0,0 +1,73 @@
+!DOCTYPE HTML PUBLIC -//W3C//DTD HTML 4.01 Frameset//EN 
http://www.w3.org/TR/html4/frameset.dtd;
+!--NewPage--
+HTML
+HEAD
+!-- Generated by javadoc on Wed Jan 28 22:31:45 CST 2015--
+TITLE
+clients 0.8.2.0 API
+/TITLE
+SCRIPT type=text/javascript
+targetPage =  + window.location.search;
+if (targetPage !=   targetPage != undefined)
+targetPage = targetPage.substring(1);
+if (targetPage.indexOf(:) != -1 || (targetPage !=   
!validURL(targetPage)))
+targetPage = undefined;
+function validURL(url) {
+var pos = url.indexOf(.html);
+if (pos == -1 || pos != url.length - 5)
+return false;
+var allowNumber = false;
+var allowSep = false;
+var seenDot = false;
+for (var i = 0; i  url.length - 5; i++) {
+var ch = url.charAt(i);
+if ('a' = ch  ch = 'z' ||
+'A' = ch  ch = 'Z' ||
+ch == '$' ||
+ch == '_') {
+allowNumber = true;
+allowSep = true;
+} else if ('0' = ch  ch = '9'
+|| ch == '-') {
+if (!allowNumber)
+ return false;
+} else if (ch == '/' || ch == '.') {
+if (!allowSep)
+return false;
+allowNumber = false;
+allowSep = false;
+if (ch == '.')
+ seenDot = true;
+if (ch == '/'  seenDot)
+ return false;
+} else {
+return false;
+}
+}
+return true;
+}
+function loadFrames() {
+if (targetPage !=   targetPage != undefined)
+ top.classFrame.location = top.targetPage;
+}
+/SCRIPT
+NOSCRIPT
+/NOSCRIPT
+/HEAD
+FRAMESET cols=20%,80% title= onLoad=top.loadFrames()
+FRAMESET rows=30%,70% title= onLoad=top.loadFrames()
+FRAME src=overview-frame.html name=packageListFrame title=All Packages
+FRAME src=allclasses-frame.html name=packageFrame title=All classes and 
interfaces (except non-static nested types)
+/FRAMESET
+FRAME src=overview-summary.html name=classFrame title=Package, class and 
interface descriptions scrolling=yes
+NOFRAMES
+H2
+Frame Alert/H2
+
+P
+This document is designed to be viewed using the frames feature. If you see 
this message, you are using a non-frame-capable web client.
+BR
+Link toA HREF=overview-summary.htmlNon-frame version./A
+/NOFRAMES
+/FRAMESET
+/HTML

Added: 
kafka/site/082/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html?rev=1656965view=auto
==
--- 
kafka/site/082/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html
 (added)
+++ 
kafka/site/082/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html
 Wed Feb  4 01:11:43 2015
@@ -0,0 +1,245 @@
+!DOCTYPE HTML PUBLIC -//W3C//DTD HTML 4.01 Transitional//EN 
http://www.w3.org/TR/html4/loose.dtd;
+!--NewPage--
+HTML
+HEAD
+!-- Generated by javadoc (build 1.6.0_65) on Wed Jan 28 22:31:45 CST 2015 --
+TITLE
+BufferExhaustedException (clients 0.8.2.0 API)
+/TITLE
+
+META NAME=date CONTENT=2015-01-28
+
+LINK REL =stylesheet TYPE=text/css HREF=../../../../../stylesheet.css 
TITLE=Style
+
+SCRIPT type=text/javascript
+function windowTitle()
+{
+if (location.href.indexOf('is-external=true') == -1) {
+parent.document.title=BufferExhaustedException (clients 0.8.2.0 API);
+}
+}
+/SCRIPT
+NOSCRIPT
+/NOSCRIPT
+
+/HEAD
+
+BODY BGCOLOR=white onload=windowTitle();
+HR
+
+
+!-- = START OF TOP NAVBAR === --
+A NAME=navbar_top!-- --/A
+A HREF=#skip-navbar_top title=Skip navigation links/A
+TABLE BORDER=0 WIDTH=100% CELLPADDING=1 CELLSPACING=0 SUMMARY=
+TR
+TD COLSPAN=2 BGCOLOR=#FF CLASS=NavBarCell1
+A NAME=navbar_top_firstrow!-- --/A
+TABLE BORDER=0 CELLPADDING=0 CELLSPACING=3 SUMMARY=
+  TR ALIGN=center VALIGN=top
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=../../../../../overview-summary.htmlFONT 
CLASS=NavBarFont1BOverview/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=package-summary.htmlFONT 
CLASS=NavBarFont1BPackage/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1Rev nbsp;FONT 
CLASS=NavBarFont1RevBClass/B/FONTnbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=package-tree.htmlFONT 
CLASS=NavBarFont1BTree/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 

svn commit: r1656965 [9/11] - in /kafka/site/082: ./ javadoc/ javadoc/org/ javadoc/org/apache/ javadoc/org/apache/kafka/ javadoc/org/apache/kafka/clients/ javadoc/org/apache/kafka/clients/producer/ ja

2015-02-03 Thread junrao
Added: 
kafka/site/082/javadoc/org/apache/kafka/common/errors/TimeoutException.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/javadoc/org/apache/kafka/common/errors/TimeoutException.html?rev=1656965view=auto
==
--- kafka/site/082/javadoc/org/apache/kafka/common/errors/TimeoutException.html 
(added)
+++ kafka/site/082/javadoc/org/apache/kafka/common/errors/TimeoutException.html 
Wed Feb  4 01:11:43 2015
@@ -0,0 +1,299 @@
+!DOCTYPE HTML PUBLIC -//W3C//DTD HTML 4.01 Transitional//EN 
http://www.w3.org/TR/html4/loose.dtd;
+!--NewPage--
+HTML
+HEAD
+!-- Generated by javadoc (build 1.6.0_65) on Wed Jan 28 22:31:45 CST 2015 --
+TITLE
+TimeoutException (clients 0.8.2.0 API)
+/TITLE
+
+META NAME=date CONTENT=2015-01-28
+
+LINK REL =stylesheet TYPE=text/css HREF=../../../../../stylesheet.css 
TITLE=Style
+
+SCRIPT type=text/javascript
+function windowTitle()
+{
+if (location.href.indexOf('is-external=true') == -1) {
+parent.document.title=TimeoutException (clients 0.8.2.0 API);
+}
+}
+/SCRIPT
+NOSCRIPT
+/NOSCRIPT
+
+/HEAD
+
+BODY BGCOLOR=white onload=windowTitle();
+HR
+
+
+!-- = START OF TOP NAVBAR === --
+A NAME=navbar_top!-- --/A
+A HREF=#skip-navbar_top title=Skip navigation links/A
+TABLE BORDER=0 WIDTH=100% CELLPADDING=1 CELLSPACING=0 SUMMARY=
+TR
+TD COLSPAN=2 BGCOLOR=#FF CLASS=NavBarCell1
+A NAME=navbar_top_firstrow!-- --/A
+TABLE BORDER=0 CELLPADDING=0 CELLSPACING=3 SUMMARY=
+  TR ALIGN=center VALIGN=top
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=../../../../../overview-summary.htmlFONT 
CLASS=NavBarFont1BOverview/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=package-summary.htmlFONT 
CLASS=NavBarFont1BPackage/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1Rev nbsp;FONT 
CLASS=NavBarFont1RevBClass/B/FONTnbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=package-tree.htmlFONT 
CLASS=NavBarFont1BTree/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=../../../../../deprecated-list.htmlFONT 
CLASS=NavBarFont1BDeprecated/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=../../../../../index-all.htmlFONT 
CLASS=NavBarFont1BIndex/B/FONT/Anbsp;/TD
+  TD BGCOLOR=#FF CLASS=NavBarCell1A 
HREF=../../../../../help-doc.htmlFONT 
CLASS=NavBarFont1BHelp/B/FONT/Anbsp;/TD
+  /TR
+/TABLE
+/TD
+TD ALIGN=right VALIGN=top ROWSPAN=3EM
+/EM
+/TD
+/TR
+
+TR
+TD BGCOLOR=white CLASS=NavBarCell2FONT SIZE=-2
+nbsp;A 
HREF=../../../../../org/apache/kafka/common/errors/SerializationException.html
 title=class in org.apache.kafka.common.errorsBPREV CLASS/B/Anbsp;
+nbsp;A 
HREF=../../../../../org/apache/kafka/common/errors/UnknownServerException.html
 title=class in org.apache.kafka.common.errorsBNEXT 
CLASS/B/A/FONT/TD
+TD BGCOLOR=white CLASS=NavBarCell2FONT SIZE=-2
+  A 
HREF=../../../../../index.html?org/apache/kafka/common/errors/TimeoutException.html
 target=_topBFRAMES/B/A  nbsp;
+nbsp;A HREF=TimeoutException.html target=_topBNO FRAMES/B/A  
nbsp;
+nbsp;SCRIPT type=text/javascript
+  !--
+  if(window==top) {
+document.writeln('A HREF=../../../../../allclasses-noframe.htmlBAll 
Classes/B/A');
+  }
+  //--
+/SCRIPT
+NOSCRIPT
+  A HREF=../../../../../allclasses-noframe.htmlBAll Classes/B/A
+/NOSCRIPT
+
+
+/FONT/TD
+/TR
+TR
+TD VALIGN=top CLASS=NavBarCell3FONT SIZE=-2
+  SUMMARY:nbsp;NESTEDnbsp;|nbsp;FIELDnbsp;|nbsp;A 
HREF=#constructor_summaryCONSTR/Anbsp;|nbsp;A 
HREF=#methods_inherited_from_class_org.apache.kafka.common.errors.ApiExceptionMETHOD/A/FONT/TD
+TD VALIGN=top CLASS=NavBarCell3FONT SIZE=-2
+DETAIL:nbsp;FIELDnbsp;|nbsp;A 
HREF=#constructor_detailCONSTR/Anbsp;|nbsp;METHOD/FONT/TD
+/TR
+/TABLE
+A NAME=skip-navbar_top/A
+!-- = END OF TOP NAVBAR = --
+
+HR
+!--  START OF CLASS DATA  --
+H2
+FONT SIZE=-1
+org.apache.kafka.common.errors/FONT
+BR
+Class TimeoutException/H2
+PRE
+java.lang.Object
+  IMG SRC=../../../../../resources/inherit.gif ALT=extended by 
java.lang.Throwable
+  IMG SRC=../../../../../resources/inherit.gif ALT=extended by 
java.lang.Exception
+  IMG SRC=../../../../../resources/inherit.gif ALT=extended by 
java.lang.RuntimeException
+  IMG SRC=../../../../../resources/inherit.gif ALT=extended by 
A HREF=../../../../../org/apache/kafka/common/KafkaException.html 
title=class in 
org.apache.kafka.commonorg.apache.kafka.common.KafkaException/A
+  IMG SRC=../../../../../resources/inherit.gif 
ALT=extended by A 
HREF=../../../../../org/apache/kafka/common/errors/ApiException.html 
title=class in 
org.apache.kafka.common.errorsorg.apache.kafka.common.errors.ApiException/A
+  IMG SRC=../../../../../resources/inherit.gif 
ALT=extended by A 
HREF=../../../../../org/apache/kafka/common/errors/RetriableException.html 
title=class in 
org.apache.kafka.common.errorsorg.apache.kafka.common.errors.RetriableException/A
+  

svn commit: r1656965 [1/11] - in /kafka/site/082: ./ javadoc/ javadoc/org/ javadoc/org/apache/ javadoc/org/apache/kafka/ javadoc/org/apache/kafka/clients/ javadoc/org/apache/kafka/clients/producer/ ja

2015-02-03 Thread junrao
Author: junrao
Date: Wed Feb  4 01:11:43 2015
New Revision: 1656965

URL: http://svn.apache.org/r1656965
Log:
update api for 082

Added:
kafka/site/082/javadoc/
kafka/site/082/javadoc/allclasses-frame.html
kafka/site/082/javadoc/allclasses-noframe.html
kafka/site/082/javadoc/constant-values.html
kafka/site/082/javadoc/deprecated-list.html
kafka/site/082/javadoc/help-doc.html
kafka/site/082/javadoc/index-all.html
kafka/site/082/javadoc/index.html
kafka/site/082/javadoc/org/
kafka/site/082/javadoc/org/apache/
kafka/site/082/javadoc/org/apache/kafka/
kafka/site/082/javadoc/org/apache/kafka/clients/
kafka/site/082/javadoc/org/apache/kafka/clients/producer/

kafka/site/082/javadoc/org/apache/kafka/clients/producer/BufferExhaustedException.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/Callback.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/MockProducer.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/Producer.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/ProducerConfig.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/ProducerRecord.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/RecordMetadata.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/package-frame.html

kafka/site/082/javadoc/org/apache/kafka/clients/producer/package-summary.html
kafka/site/082/javadoc/org/apache/kafka/clients/producer/package-tree.html
kafka/site/082/javadoc/org/apache/kafka/common/
kafka/site/082/javadoc/org/apache/kafka/common/Cluster.html
kafka/site/082/javadoc/org/apache/kafka/common/Configurable.html
kafka/site/082/javadoc/org/apache/kafka/common/KafkaException.html
kafka/site/082/javadoc/org/apache/kafka/common/Metric.html
kafka/site/082/javadoc/org/apache/kafka/common/MetricName.html
kafka/site/082/javadoc/org/apache/kafka/common/Node.html
kafka/site/082/javadoc/org/apache/kafka/common/PartitionInfo.html
kafka/site/082/javadoc/org/apache/kafka/common/TopicPartition.html
kafka/site/082/javadoc/org/apache/kafka/common/errors/
kafka/site/082/javadoc/org/apache/kafka/common/errors/ApiException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/CorruptRecordException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/InvalidMetadataException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/InvalidTopicException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/LeaderNotAvailableException.html
kafka/site/082/javadoc/org/apache/kafka/common/errors/NetworkException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasAfterAppendException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/NotEnoughReplicasException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/NotLeaderForPartitionException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/OffsetMetadataTooLarge.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/OffsetOutOfRangeException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/RecordBatchTooLargeException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/RecordTooLargeException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/RetriableException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/SerializationException.html
kafka/site/082/javadoc/org/apache/kafka/common/errors/TimeoutException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/UnknownServerException.html

kafka/site/082/javadoc/org/apache/kafka/common/errors/UnknownTopicOrPartitionException.html
kafka/site/082/javadoc/org/apache/kafka/common/errors/package-frame.html
kafka/site/082/javadoc/org/apache/kafka/common/errors/package-summary.html
kafka/site/082/javadoc/org/apache/kafka/common/errors/package-tree.html
kafka/site/082/javadoc/org/apache/kafka/common/package-frame.html
kafka/site/082/javadoc/org/apache/kafka/common/package-summary.html
kafka/site/082/javadoc/org/apache/kafka/common/package-tree.html
kafka/site/082/javadoc/org/apache/kafka/common/serialization/

kafka/site/082/javadoc/org/apache/kafka/common/serialization/ByteArrayDeserializer.html

kafka/site/082/javadoc/org/apache/kafka/common/serialization/ByteArraySerializer.html

kafka/site/082/javadoc/org/apache/kafka/common/serialization/Deserializer.html
kafka/site/082/javadoc/org/apache/kafka/common/serialization/Serializer.html

kafka/site/082/javadoc/org/apache/kafka/common/serialization/StringDeserializer.html

kafka/site/082/javadoc/org/apache/kafka/common/serialization/StringSerializer.html

kafka/site/082/javadoc/org/apache/kafka/common/serialization/package-frame.html

kafka/site/082/javadoc/org/apache

svn commit: r1656967 - in /kafka/site: 082/api.html documentation.html downloads.html

2015-02-03 Thread junrao
Author: junrao
Date: Wed Feb  4 01:35:26 2015
New Revision: 1656967

URL: http://svn.apache.org/r1656967
Log:
flip the main doc to 082

Modified:
kafka/site/082/api.html
kafka/site/documentation.html
kafka/site/downloads.html

Modified: kafka/site/082/api.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/api.html?rev=1656967r1=1656966r2=1656967view=diff
==
--- kafka/site/082/api.html (original)
+++ kafka/site/082/api.html Wed Feb  4 01:35:26 2015
@@ -38,7 +38,7 @@ You can follow
 a href=javadoc/org/apache/kafka/clients/producer/KafkaProducer.html 
title=Kafka 0.8.2 producer examplethis example/a to learn how to use the 
producer api.
 
 p
-The old producer api can be found at a 
href=http://kafka.apache.org/081/documentation.html#producerapi;
+The old producer api can be found a 
href=http://kafka.apache.org/081/documentation.html#producerapi;
 here/a. However, we encourage everyone to migrate to the new producer.
 /p
 

Modified: kafka/site/documentation.html
URL: 
http://svn.apache.org/viewvc/kafka/site/documentation.html?rev=1656967r1=1656966r2=1656967view=diff
==
--- kafka/site/documentation.html (original)
+++ kafka/site/documentation.html Wed Feb  4 01:35:26 2015
@@ -1,2 +1,2 @@
 !-- should always link the the latest release's documentation --
-!--#include virtual=081/documentation.html --
\ No newline at end of file
+!--#include virtual=082/documentation.html --
\ No newline at end of file

Modified: kafka/site/downloads.html
URL: 
http://svn.apache.org/viewvc/kafka/site/downloads.html?rev=1656967r1=1656966r2=1656967view=diff
==
--- kafka/site/downloads.html (original)
+++ kafka/site/downloads.html Wed Feb  4 01:35:26 2015
@@ -1,10 +1,33 @@
 !--#include virtual=includes/header.html --
 
 h1Releases/h1
-0.8.2-beta is the latest release. The current stable version is 0.8.1.1. 
+0.8.2.0 is the latest release. The current stable version is 0.8.2.0. 
 
 p
-You can verify your download by following these a 
href=http://www.apache.org/info/verification.html;procedures/a and using 
these a href=http://svn.apache.org/repos/asf/kafka/KEYS;KEYS/a.
+You can verify your download by following these a 
href=http://www.apache.org/info/verification.html;procedures/a and using 
these a href=http://kafka.apache.org/KEYS;KEYS/a.
+h30.8.2.0/h3
+ul
+  li
+a 
href=https://archive.apache.org/dist/kafka/0.8.2.0/RELEASE_NOTES.html;Release 
Notes/a
+  /li
+   li
+Source download: a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.0/kafka-0.8.2.0-src.tgz;kafka-0.8.2.0-src.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka-0.8.2.0-src.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka-0.8.2.0-src.tgz.md5;md5/a)
 
+  /li
+   li
+Binary downloads:
+ul
+  liScala 2.9.1 - a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.0/kafka_2.9.1-0.8.2.0.tgz;kafka_2.9.1-0.8.2.0.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.9.1-0.8.2.0.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.9.1-0.8.2.0.tgz.md5;md5/a)
 
+  /li
+  liScala 2.9.2 - a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.0/kafka_2.9.2-0.8.2.0.tgz;kafka_2.9.2-0.8.2.0.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.9.2-0.8.2.0.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.9.2-0.8.2.0.tgz.md5;md5/a)
 
+  /li
+  liScala 2.10 nbsp;- a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.0/kafka_2.10-0.8.2.0.tgz;kafka_2.10-0.8.2.0.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.10-0.8.2.0.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.10-0.8.2.0.tgz.md5;md5/a)
 
+  /li
+  liScala 2.11 nbsp;- a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.0/kafka_2.11-0.8.2.0.tgz;kafka_2.11-0.8.2.0.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.11-0.8.2.0.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.0/kafka_2.11-0.8.2.0.tgz.md5;md5/a)
 
+  /li
+/ul
+We build for multiple versions of Scala. This only matters if you are using 
Scala and you want a version built for the same Scala version you use. 
Otherwise any version should work (2.10 is recommended).
+  /li
+/ul
 h30.8.2-beta/h3
 ul
   li




svn commit: r1656968 - /kafka/site/082/api.html

2015-02-03 Thread junrao
Author: junrao
Date: Wed Feb  4 01:40:13 2015
New Revision: 1656968

URL: http://svn.apache.org/r1656968
Log:
fix javadoc link from api page

Modified:
kafka/site/082/api.html

Modified: kafka/site/082/api.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/api.html?rev=1656968r1=1656967r2=1656968view=diff
==
--- kafka/site/082/api.html (original)
+++ kafka/site/082/api.html Wed Feb  4 01:40:13 2015
@@ -35,7 +35,7 @@ public interface ProducerK,V extends C
  
 /pre
 You can follow
-a href=javadoc/org/apache/kafka/clients/producer/KafkaProducer.html 
title=Kafka 0.8.2 producer examplethis example/a to learn how to use the 
producer api.
+a 
href=http://kafka.apache.org/082/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html;
 title=Kafka 0.8.2 producer examplethis example/a to learn how to use the 
producer api.
 
 p
 The old producer api can be found a 
href=http://kafka.apache.org/081/documentation.html#producerapi;




kafka git commit: KAFKA-1723 (delta patch to fix javadoc); make the metrics name in new producer more standard; patched by Manikumar Reddy; reviewed by Jun Rao

2015-01-14 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk bfb2da3c8 - a61117840


KAFKA-1723 (delta patch to fix javadoc); make the metrics name in new producer 
more standard; patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a6111784
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a6111784
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a6111784

Branch: refs/heads/trunk
Commit: a611178408cf8497054ff015caba18cfcff70a60
Parents: bfb2da3
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Wed Jan 14 12:02:50 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Wed Jan 14 12:02:50 2015 -0800

--
 .../kafka/clients/producer/KafkaProducer.java   | 22 +-
 .../org/apache/kafka/common/MetricName.java | 24 
 2 files changed, 26 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a6111784/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index c79149a..fc71710 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -265,32 +265,32 @@ public class KafkaProducerK,V implements ProducerK,V {
  * p
  * If you want to simulate a simple blocking call you can do the following:
  * 
- * pre
- *   producer.send(new ProducerRecordbyte[],byte[](the-topic, 
key.getBytes(), value.getBytes())).get();
- * /pre
+ * pre{@code
+ * producer.send(new ProducerRecordbyte[],byte[](the-topic, 
key.getBytes(), value.getBytes())).get();
+ * }/pre
  * p
  * Those desiring fully non-blocking usage can make use of the {@link 
Callback} parameter to provide a callback that
  * will be invoked when the request is complete.
  * 
- * pre
- *   ProducerRecordbyte[],byte[] record = new 
ProducerRecordbyte[],byte[](the-topic, key.getBytes(), 
value.getBytes());
+ * pre{@code
+ * ProducerRecordbyte[],byte[] record = new 
ProducerRecordbyte[],byte[](the-topic, key.getBytes(), 
value.getBytes());
  *   producer.send(myRecord,
- * new Callback() {
+ *new Callback() {
  * public void onCompletion(RecordMetadata metadata, 
Exception e) {
  * if(e != null)
  * e.printStackTrace();
  * System.out.println(The offset of the record we 
just sent is:  + metadata.offset());
  * }
- * });
- * /pre
+ *});
+ * }/pre
  * 
  * Callbacks for records being sent to the same partition are guaranteed 
to execute in order. That is, in the
  * following example codecallback1/code is guaranteed to execute 
before codecallback2/code:
  * 
- * pre
- * producer.send(new ProducerRecordbyte[],byte[](topic, partition, key, 
value), callback1);
+ * pre{@code
+ * producer.send(new ProducerRecordbyte[],byte[](topic, partition, key1, 
value1), callback1);
  * producer.send(new ProducerRecordbyte[],byte[](topic, partition, key2, 
value2), callback2);
- * /pre
+ * }/pre
  * p
  * Note that callbacks will generally execute in the I/O thread of the 
producer and so should be reasonably fast or
  * they will delay the sending of messages from other threads. If you want 
to execute blocking or computationally

http://git-wip-us.apache.org/repos/asf/kafka/blob/a6111784/clients/src/main/java/org/apache/kafka/common/MetricName.java
--
diff --git a/clients/src/main/java/org/apache/kafka/common/MetricName.java 
b/clients/src/main/java/org/apache/kafka/common/MetricName.java
index 4e810d5..7e977e9 100644
--- a/clients/src/main/java/org/apache/kafka/common/MetricName.java
+++ b/clients/src/main/java/org/apache/kafka/common/MetricName.java
@@ -19,34 +19,40 @@ import org.apache.kafka.common.utils.Utils;
 
 /**
  * The codeMetricName/code class encapsulates a metric's name, logical 
group and its related attributes
- * p/
+ * p
  * This class captures the following parameters
  * pre
  *  bname/b The name of the metric
  *  bgroup/b logical group name of the metrics to which this metric 
belongs.
  *  bdescription/b A human-readable description to include in the metric. 
This is optional.
  *  btags/b additional key/value attributes of the metric. This is 
optional.
- 

kafka git commit: KAFKA-1723; (followup patch to fix javadoc for java 8) make the metrics name in new producer more standard; patched by Manikumar Reddy; reviewed by Jun Rao

2015-01-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 17c8bdcbb - 988e695fa


KAFKA-1723; (followup patch to fix javadoc for java 8) make the metrics name in 
new producer more standard; patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/988e695f
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/988e695f
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/988e695f

Branch: refs/heads/0.8.2
Commit: 988e695fa294c5e9799509f6313b95e1f96682da
Parents: 17c8bdc
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Fri Jan 16 18:09:42 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 16 18:09:42 2015 -0800

--
 .../kafka/clients/producer/KafkaProducer.java   | 34 ++--
 .../kafka/clients/producer/MockProducer.java|  8 ++---
 .../org/apache/kafka/common/MetricName.java | 24 --
 3 files changed, 36 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/988e695f/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index 0bfda4b..30477d7 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -264,33 +264,33 @@ public class KafkaProducerK,V implements ProducerK,V {
  * sending the record.
  * p
  * If you want to simulate a simple blocking call you can do the following:
- * 
- * pre
- *   producer.send(new ProducerRecord(the-topic, key, value)).get();
- * /pre
+ *
+ * pre{@code
+ * producer.send(new ProducerRecordbyte[],byte[](the-topic, 
key.getBytes(), value.getBytes())).get();
+ * }/pre
  * p
  * Those desiring fully non-blocking usage can make use of the {@link 
Callback} parameter to provide a callback that
  * will be invoked when the request is complete.
- * 
- * pre
- *   ProducerRecord record = new ProducerRecord(the-topic, key, 
value);
+ *
+ * pre{@code
+ * ProducerRecordbyte[],byte[] record = new 
ProducerRecordbyte[],byte[](the-topic, key.getBytes(), 
value.getBytes());
  *   producer.send(myRecord,
- * new Callback() {
+ *new Callback() {
  * public void onCompletion(RecordMetadata metadata, 
Exception e) {
  * if(e != null)
  * e.printStackTrace();
  * System.out.println(The offset of the record we 
just sent is:  + metadata.offset());
  * }
- * });
- * /pre
- * 
+ *});
+ * }/pre
+ *
  * Callbacks for records being sent to the same partition are guaranteed 
to execute in order. That is, in the
  * following example codecallback1/code is guaranteed to execute 
before codecallback2/code:
- * 
- * pre
- * producer.send(new ProducerRecord(topic, partition, key, value), 
callback1);
- * producer.send(new ProducerRecord(topic, partition, key2, value2), 
callback2);
- * /pre
+ *
+ * pre{@code
+ * producer.send(new ProducerRecordbyte[],byte[](topic, partition, key1, 
value1), callback1);
+ * producer.send(new ProducerRecordbyte[],byte[](topic, partition, key2, 
value2), callback2);
+ * }/pre
  * p
  * Note that callbacks will generally execute in the I/O thread of the 
producer and so should be reasonably fast or
  * they will delay the sending of messages from other threads. If you want 
to execute blocking or computationally
@@ -329,7 +329,7 @@ public class KafkaProducerK,V implements ProducerK,V {
  to class  + 
producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() 
+
  specified in value.serializer);
 }
-ProducerRecord serializedRecord = new ProducerRecordbyte[], 
byte[](record.topic(), record.partition(), serializedKey, serializedValue);
+ProducerRecordbyte[], byte[] serializedRecord = new 
ProducerRecordbyte[], byte[](record.topic(), record.partition(), 
serializedKey, serializedValue);
 int partition = partitioner.partition(serializedRecord, 
metadata.fetch());
 int serializedSize = Records.LOG_OVERHEAD + 
Record.recordSize(serializedKey, serializedValue);
 ensureValidRecordSize(serializedSize);


kafka git commit: kafka-1870; Cannot commit with simpleConsumer on Zookeeper only with Java API; patched by Jun Rao; reviewed by Jeol Koshy and Sriharsha Chintalapani

2015-01-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 988e695fa - 1d3fd0f6c


kafka-1870; Cannot commit with simpleConsumer on Zookeeper only with Java API; 
patched by Jun Rao; reviewed by Jeol Koshy and Sriharsha Chintalapani


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1d3fd0f6
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1d3fd0f6
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1d3fd0f6

Branch: refs/heads/0.8.2
Commit: 1d3fd0f6c875e8162682679fa9299c3dd71a25ed
Parents: 988e695
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 16 18:25:51 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 16 18:25:51 2015 -0800

--
 core/src/main/scala/kafka/consumer/SimpleConsumer.scala | 2 ++
 core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala | 1 +
 core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala  | 2 +-
 core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala | 4 ++--
 4 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1d3fd0f6/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/consumer/SimpleConsumer.scala 
b/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
index e53ee51..b7d6656 100644
--- a/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
+++ b/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
@@ -128,6 +128,7 @@ class SimpleConsumer(val host: String,
 
   /**
* Commit offsets for a topic
+   * Version 0 of the request will commit offsets to Zookeeper and version 1 
and above will commit offsets to Kafka.
* @param request a [[kafka.api.OffsetCommitRequest]] object.
* @return a [[kafka.api.OffsetCommitResponse]] object.
*/
@@ -139,6 +140,7 @@ class SimpleConsumer(val host: String,
 
   /**
* Fetch offsets for a topic
+   * Version 0 of the request will fetch offsets from Zookeeper and version 1 
version 1 and above will fetch offsets from Kafka.
* @param request a [[kafka.api.OffsetFetchRequest]] object.
* @return a [[kafka.api.OffsetFetchResponse]] object.
*/

http://git-wip-us.apache.org/repos/asf/kafka/blob/1d3fd0f6/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala 
b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
index 27fc1eb..873f575 100644
--- a/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
+++ b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
@@ -32,6 +32,7 @@ class OffsetCommitRequest(groupId: String,
 kafka.api.OffsetCommitRequest(
   groupId = groupId,
   requestInfo = scalaMap,
+  versionId = 0, // binds to version 0 so that it commits to Zookeeper
   correlationId = correlationId,
   clientId = clientId
 )

http://git-wip-us.apache.org/repos/asf/kafka/blob/1d3fd0f6/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala 
b/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
index 5b4f4bb..1c25aa3 100644
--- a/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
+++ b/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
@@ -36,7 +36,7 @@ class OffsetFetchRequest(groupId: String,
 kafka.api.OffsetFetchRequest(
   groupId = groupId,
   requestInfo = scalaSeq,
-  versionId = versionId,
+  versionId = 0, // binds to version 0 so that it commits to Zookeeper
   correlationId = correlationId,
   clientId = clientId
 )

http://git-wip-us.apache.org/repos/asf/kafka/blob/1d3fd0f6/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala 
b/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
index 0ab0195..abf6069 100644
--- a/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
+++ b/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
@@ -80,7 +80,7 @@ class SimpleConsumer(val host: String,
   }
 
   /**
-   * Commit offsets for a topic
+   * Commit offsets for a topic to Zookeeper
* @param request a [[kafka.javaapi.OffsetCommitRequest]] object.
* @return a [[kafka.javaapi.OffsetCommitResponse]] object.
*/
@@ -90,7 +90,7 @@ class SimpleConsumer(val host: String,
   }
 
   /**
-   * Fetch offsets for a topic
+   * Fetch offsets for a topic from Zookeeper
* @param request a 

kafka git commit: kafka-1870; Cannot commit with simpleConsumer on Zookeeper only with Java API; patched by Jun Rao; reviewed by Jeol Koshy and Sriharsha Chintalapani

2015-01-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 602d572f6 - 1769642bb


kafka-1870; Cannot commit with simpleConsumer on Zookeeper only with Java API; 
patched by Jun Rao; reviewed by Jeol Koshy and Sriharsha Chintalapani


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1769642b
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1769642b
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1769642b

Branch: refs/heads/trunk
Commit: 1769642bb779921267bd57d3d338591dbdf33842
Parents: 602d572
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 16 18:34:39 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 16 18:34:39 2015 -0800

--
 core/src/main/scala/kafka/consumer/SimpleConsumer.scala | 2 ++
 core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala | 1 +
 core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala  | 2 +-
 core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala | 4 ++--
 4 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1769642b/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/consumer/SimpleConsumer.scala 
b/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
index e53ee51..cbef84a 100644
--- a/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
+++ b/core/src/main/scala/kafka/consumer/SimpleConsumer.scala
@@ -128,6 +128,7 @@ class SimpleConsumer(val host: String,
 
   /**
* Commit offsets for a topic
+   * Version 0 of the request will commit offsets to Zookeeper and version 1 
and above will commit offsets to Kafka.
* @param request a [[kafka.api.OffsetCommitRequest]] object.
* @return a [[kafka.api.OffsetCommitResponse]] object.
*/
@@ -139,6 +140,7 @@ class SimpleConsumer(val host: String,
 
   /**
* Fetch offsets for a topic
+   * Version 0 of the request will fetch offsets from Zookeeper and version 1 
and above will fetch offsets from Kafka.
* @param request a [[kafka.api.OffsetFetchRequest]] object.
* @return a [[kafka.api.OffsetFetchResponse]] object.
*/

http://git-wip-us.apache.org/repos/asf/kafka/blob/1769642b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala 
b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
index 27fc1eb..873f575 100644
--- a/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
+++ b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
@@ -32,6 +32,7 @@ class OffsetCommitRequest(groupId: String,
 kafka.api.OffsetCommitRequest(
   groupId = groupId,
   requestInfo = scalaMap,
+  versionId = 0, // binds to version 0 so that it commits to Zookeeper
   correlationId = correlationId,
   clientId = clientId
 )

http://git-wip-us.apache.org/repos/asf/kafka/blob/1769642b/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala 
b/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
index 5b4f4bb..1c25aa3 100644
--- a/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
+++ b/core/src/main/scala/kafka/javaapi/OffsetFetchRequest.scala
@@ -36,7 +36,7 @@ class OffsetFetchRequest(groupId: String,
 kafka.api.OffsetFetchRequest(
   groupId = groupId,
   requestInfo = scalaSeq,
-  versionId = versionId,
+  versionId = 0, // binds to version 0 so that it commits to Zookeeper
   correlationId = correlationId,
   clientId = clientId
 )

http://git-wip-us.apache.org/repos/asf/kafka/blob/1769642b/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala 
b/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
index 0ab0195..abf6069 100644
--- a/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
+++ b/core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala
@@ -80,7 +80,7 @@ class SimpleConsumer(val host: String,
   }
 
   /**
-   * Commit offsets for a topic
+   * Commit offsets for a topic to Zookeeper
* @param request a [[kafka.javaapi.OffsetCommitRequest]] object.
* @return a [[kafka.javaapi.OffsetCommitResponse]] object.
*/
@@ -90,7 +90,7 @@ class SimpleConsumer(val host: String,
   }
 
   /**
-   * Fetch offsets for a topic
+   * Fetch offsets for a topic from Zookeeper
* @param request a [[kafka.javaapi.OffsetFetchRequest]] 

kafka git commit: kafka-1864; Revisit defaults for the internal offsets topic; patched by Jun Rao; reviewed by Jeol Koshy, Neha Narkhede, and Gwen Shapira

2015-01-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 1d3fd0f6c - 7a313999d


kafka-1864; Revisit defaults for the internal offsets topic; patched by Jun 
Rao; reviewed by Jeol Koshy, Neha Narkhede, and Gwen Shapira


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7a313999
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7a313999
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7a313999

Branch: refs/heads/0.8.2
Commit: 7a313999d0d2c846541299ded77b25eddf1bf554
Parents: 1d3fd0f
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 16 18:48:33 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 16 18:48:33 2015 -0800

--
 core/src/main/scala/kafka/server/KafkaApis.scala | 11 +--
 core/src/main/scala/kafka/server/KafkaConfig.scala   |  6 +-
 core/src/main/scala/kafka/server/OffsetManager.scala |  4 ++--
 .../kafka/api/ProducerFailureHandlingTest.scala  |  7 +++
 .../test/scala/unit/kafka/server/OffsetCommitTest.scala  |  7 +++
 5 files changed, 30 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/7a313999/core/src/main/scala/kafka/server/KafkaApis.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala 
b/core/src/main/scala/kafka/server/KafkaApis.scala
index d626b17..7def852 100644
--- a/core/src/main/scala/kafka/server/KafkaApis.scala
+++ b/core/src/main/scala/kafka/server/KafkaApis.scala
@@ -498,10 +498,17 @@ class KafkaApis(val requestChannel: RequestChannel,
 if (topic == OffsetManager.OffsetsTopicName || 
config.autoCreateTopicsEnable) {
   try {
 if (topic == OffsetManager.OffsetsTopicName) {
-  AdminUtils.createTopic(zkClient, topic, 
config.offsetsTopicPartitions, config.offsetsTopicReplicationFactor,
+  val aliveBrokers = metadataCache.getAliveBrokers
+  val offsetsTopicReplicationFactor =
+if (aliveBrokers.length  0)
+  Math.min(config.offsetsTopicReplicationFactor, 
aliveBrokers.length)
+else
+  config.offsetsTopicReplicationFactor
+  AdminUtils.createTopic(zkClient, topic, 
config.offsetsTopicPartitions,
+ offsetsTopicReplicationFactor,
  offsetManager.offsetsTopicConfig)
   info(Auto creation of topic %s with %d partitions and 
replication factor %d is successful!
-.format(topic, config.offsetsTopicPartitions, 
config.offsetsTopicReplicationFactor))
+.format(topic, config.offsetsTopicPartitions, 
offsetsTopicReplicationFactor))
 }
 else {
   AdminUtils.createTopic(zkClient, topic, config.numPartitions, 
config.defaultReplicationFactor)

http://git-wip-us.apache.org/repos/asf/kafka/blob/7a313999/core/src/main/scala/kafka/server/KafkaConfig.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index 6e26c54..e3396ad 100644
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -304,7 +304,11 @@ class KafkaConfig private (val props: 
VerifiableProperties) extends ZKConfig(pro
   val offsetsLoadBufferSize = props.getIntInRange(offsets.load.buffer.size,
 OffsetManagerConfig.DefaultLoadBufferSize, (1, Integer.MAX_VALUE))
 
-  /** The replication factor for the offset commit topic (set higher to ensure 
availability). */
+  /** The replication factor for the offsets topic (set higher to ensure 
availability). To
+* ensure that the effective replication factor of the offsets topic is the 
configured value,
+* the number of alive brokers has to be at least the replication factor at 
the time of the
+* first request for the offsets topic. If not, either the offsets topic 
creation will fail or
+* it will get a replication factor of min(alive brokers, configured 
replication factor) */
   val offsetsTopicReplicationFactor: Short = 
props.getShortInRange(offsets.topic.replication.factor,
 OffsetManagerConfig.DefaultOffsetsTopicReplicationFactor, (1, 
Short.MaxValue))
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/7a313999/core/src/main/scala/kafka/server/OffsetManager.scala
--
diff --git a/core/src/main/scala/kafka/server/OffsetManager.scala 
b/core/src/main/scala/kafka/server/OffsetManager.scala
index 43eb2a3..d3e8868 100644
--- a/core/src/main/scala/kafka/server/OffsetManager.scala
+++ 

kafka git commit: kafka-1864; Revisit defaults for the internal offsets topic; patched by Jun Rao; reviewed by Jeol Koshy, Neha Narkhede, and Gwen Shapira

2015-01-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 1769642bb - 5174df537


kafka-1864; Revisit defaults for the internal offsets topic; patched by Jun 
Rao; reviewed by Jeol Koshy, Neha Narkhede, and Gwen Shapira


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/5174df53
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/5174df53
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/5174df53

Branch: refs/heads/trunk
Commit: 5174df53778cb5cb2d6d86e4cec9f3185a2c85db
Parents: 1769642
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 16 18:56:32 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 16 18:56:32 2015 -0800

--
 core/src/main/scala/kafka/server/KafkaApis.scala | 11 +--
 core/src/main/scala/kafka/server/KafkaConfig.scala   |  6 +-
 core/src/main/scala/kafka/server/OffsetManager.scala |  4 ++--
 .../kafka/api/ProducerFailureHandlingTest.scala  |  7 +++
 .../test/scala/unit/kafka/server/OffsetCommitTest.scala  |  7 +++
 5 files changed, 30 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/5174df53/core/src/main/scala/kafka/server/KafkaApis.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala 
b/core/src/main/scala/kafka/server/KafkaApis.scala
index c011a1b..ec8d9f7 100644
--- a/core/src/main/scala/kafka/server/KafkaApis.scala
+++ b/core/src/main/scala/kafka/server/KafkaApis.scala
@@ -351,10 +351,17 @@ class KafkaApis(val requestChannel: RequestChannel,
 if (topic == OffsetManager.OffsetsTopicName || 
config.autoCreateTopicsEnable) {
   try {
 if (topic == OffsetManager.OffsetsTopicName) {
-  AdminUtils.createTopic(zkClient, topic, 
config.offsetsTopicPartitions, config.offsetsTopicReplicationFactor,
+  val aliveBrokers = metadataCache.getAliveBrokers
+  val offsetsTopicReplicationFactor =
+if (aliveBrokers.length  0)
+  Math.min(config.offsetsTopicReplicationFactor, 
aliveBrokers.length)
+else
+  config.offsetsTopicReplicationFactor
+  AdminUtils.createTopic(zkClient, topic, 
config.offsetsTopicPartitions,
+ offsetsTopicReplicationFactor,
  offsetManager.offsetsTopicConfig)
   info(Auto creation of topic %s with %d partitions and 
replication factor %d is successful!
-.format(topic, config.offsetsTopicPartitions, 
config.offsetsTopicReplicationFactor))
+.format(topic, config.offsetsTopicPartitions, 
offsetsTopicReplicationFactor))
 }
 else {
   AdminUtils.createTopic(zkClient, topic, config.numPartitions, 
config.defaultReplicationFactor)

http://git-wip-us.apache.org/repos/asf/kafka/blob/5174df53/core/src/main/scala/kafka/server/KafkaConfig.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index d3d8ac4..88689df 100644
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -312,7 +312,11 @@ class KafkaConfig private (val props: 
VerifiableProperties) extends ZKConfig(pro
   val offsetsLoadBufferSize = props.getIntInRange(offsets.load.buffer.size,
 OffsetManagerConfig.DefaultLoadBufferSize, (1, Integer.MAX_VALUE))
 
-  /** The replication factor for the offset commit topic (set higher to ensure 
availability). */
+  /** The replication factor for the offsets topic (set higher to ensure 
availability). To
+* ensure that the effective replication factor of the offsets topic is the 
configured value,
+* the number of alive brokers has to be at least the replication factor at 
the time of the
+* first request for the offsets topic. If not, either the offsets topic 
creation will fail or
+* it will get a replication factor of min(alive brokers, configured 
replication factor) */
   val offsetsTopicReplicationFactor: Short = 
props.getShortInRange(offsets.topic.replication.factor,
 OffsetManagerConfig.DefaultOffsetsTopicReplicationFactor, (1, 
Short.MaxValue))
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/5174df53/core/src/main/scala/kafka/server/OffsetManager.scala
--
diff --git a/core/src/main/scala/kafka/server/OffsetManager.scala 
b/core/src/main/scala/kafka/server/OffsetManager.scala
index 3c79428..0bdd42f 100644
--- a/core/src/main/scala/kafka/server/OffsetManager.scala
+++ 

kafka git commit: trivial change to remove -UseCompressedOops option from script

2015-01-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 5174df537 - 09234f0e5


trivial change to remove -UseCompressedOops option from script


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/09234f0e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/09234f0e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/09234f0e

Branch: refs/heads/trunk
Commit: 09234f0e5206b8cb55e89cffc4a0ecfbaafc7f86
Parents: 5174df5
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 16 19:04:11 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 16 19:04:11 2015 -0800

--
 bin/kafka-run-class.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/09234f0e/bin/kafka-run-class.sh
--
diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh
index ce3a4d0..22a9865 100755
--- a/bin/kafka-run-class.sh
+++ b/bin/kafka-run-class.sh
@@ -112,7 +112,7 @@ fi
 
 # JVM performance options
 if [ -z $KAFKA_JVM_PERFORMANCE_OPTS ]; then
-  KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseCompressedOops -XX:+UseParNewGC 
-XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled 
-XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true
+  KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC 
-XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark 
-XX:+DisableExplicitGC -Djava.awt.headless=true
 fi
 
 



kafka git commit: Revert kafka-1711; WARN Property topic is not valid when running console producer; patched by Joe Crobak; reviewed by Jun Rao

2015-01-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 059b09dec - 0b312a6b9


Revert kafka-1711; WARN Property topic is not valid when running console 
producer; patched by Joe Crobak; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/0b312a6b
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/0b312a6b
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/0b312a6b

Branch: refs/heads/0.8.2
Commit: 0b312a6b9f0833d38eec434bfff4c647c1814564
Parents: 059b09d
Author: Jun Rao jun...@gmail.com
Authored: Mon Jan 19 15:53:53 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 19 15:53:53 2015 -0800

--
 core/src/main/scala/kafka/tools/ConsoleProducer.scala | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/0b312a6b/core/src/main/scala/kafka/tools/ConsoleProducer.scala
--
diff --git a/core/src/main/scala/kafka/tools/ConsoleProducer.scala 
b/core/src/main/scala/kafka/tools/ConsoleProducer.scala
index 2a8e981..a680b62 100644
--- a/core/src/main/scala/kafka/tools/ConsoleProducer.scala
+++ b/core/src/main/scala/kafka/tools/ConsoleProducer.scala
@@ -35,11 +35,9 @@ object ConsoleProducer {
 val config = new ProducerConfig(args)
 val reader = 
Class.forName(config.readerClass).newInstance().asInstanceOf[MessageReader]
 val props = new Properties
+props.put(topic, config.topic)
 props.putAll(config.cmdLineProps)
-
-val readerProps = new Properties(props)
-readerProps.put(topic, config.topic)
-reader.init(System.in, readerProps)
+reader.init(System.in, props)
 
 try {
 val producer =



Git Push Summary

2015-01-19 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.0 [deleted] b0c7d579f


Git Push Summary

2015-01-19 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.0 [created] 058d58ade
  refs/tags/show [created] b0c7d579f


svn commit: r1653131 - /kafka/site/KEYS

2015-01-19 Thread junrao
Author: junrao
Date: Mon Jan 19 22:00:16 2015
New Revision: 1653131

URL: http://svn.apache.org/r1653131
Log:
move KEYS from top level to site since top level is readonly now

Added:
kafka/site/KEYS

Added: kafka/site/KEYS
URL: http://svn.apache.org/viewvc/kafka/site/KEYS?rev=1653131view=auto
==
--- kafka/site/KEYS (added)
+++ kafka/site/KEYS Mon Jan 19 22:00:16 2015
@@ -0,0 +1,294 @@
+This file contains the PGP keys of various developers.
+
+Users: pgp  KEYS
+   gpg --import KEYS
+Developers:
+pgp -kxa your name and append it to this file.
+(pgpk -ll your name  pgpk -xa your name)  this file.
+(gpg --list-sigs your name
+  gpg --armor --export your name)  this file.
+
+pub   4096R/99369B56 2011-10-06
+uid  Neha Narkhede (Key for signing code and releases) 
nehanarkh...@apache.org
+sig 399369B56 2011-10-06  Neha Narkhede (Key for signing code and 
releases) nehanarkh...@apache.org
+sub   4096R/A71D126A 2011-10-06
+sig  99369B56 2011-10-06  Neha Narkhede (Key for signing code and 
releases) nehanarkh...@apache.org
+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG/MacGPG2 v2.0.17 (Darwin)
+Comment: GPGTools - http://gpgtools.org
+
+mQENBEt9wioBCADh0bdDopK7wdLLt6YIEA3KWdXmRhhmY2PDikKZq5EQlwkAmdZF
+/CTcheArdAXXuxfPN8kZp4MJE01mrgyZA9S8tsYG1GarPTpYUDXxZJJgswSKNAbU
+j4sL1sYm89pzsm57Mjt+4ek9F+GJeEBOiog3/4oaaVeuzFT2LhLbD2PY5CS/5MdZ
+t4KaqPAHoAQQGwzGSzaXxXbGKlQUDm0W33jjyWyON/eHHLUHbFwlb9f+du8DJLQ6
+WQjFR/xSzSxztJFUF1JUBm6l9ZMMMeAojTEBKXeh9Bo7iWcVU/nJ+VebAE8IpgU4
+9TnNf/o2iJnKKTyKLW5QDR83gIFsml/6sja3ABEBAAGJAR8EIAEKAAkFAkyaTuQC
+HQEACgkQaCVaMFMDgyhOlAgAixphPIQp1z/5fuuiXLBTPzLLRsR3UQ9jeAowUdrA
+utnn7uVHvAqcUy/WLGJyRMVQl3y8oNAQXmNSfeLzp2Bs/uRuOf4B2b2XBX3F6AqX
+1UI7ASzyG1cowHNZ+Oq3Edg6YwQCt+5QwrlLCcp4eo3J0/NwxrRqYM5TdFVnvN3L
+PmHMPlOhLfZJRf2/g9dcWpIcLEYaGgJMD4uUogaN2CT2GXjntFsdgRG7jYasTK6+
+TF3ML/rB/tbNcMo23IiQ8GKaGO04uQUlyo2b3ix4uUZGtIIIFtZMOcgSLTKSavn/
+hPMe6wPOkB/Onno0zDUBCrpACxjcW9fyTALBlrJTCrYpz7QjQ2hyaXMgRG91Z2xh
+cyA8Y2RvdWdsYXNAYXBhY2hlLm9yZz6JATgEEwECACIFAkt9wioCGwMGCwkIBwMC
+BhUIAgkKCwQWAgMBAh4BAheAAAoJEGglWjBTA4MoN1QH+gJLOaIUhIqECvwz77js
+9HfadKmKqwc0SGDpoZm2tMeUsp8+26fhqEdprNcfKV530+M4e4l/ka4Y96p7C+EW
+kAuMQ41F9YnvxX9dATc+RgLueVkaUxMjNLPAIwQkUyQoeR/tLcMpNI/lbWdwEJRK
+unufbxDGIXVjnN8y9dkmlZ5NGsAWa2ZoYDrpKHFwTKvNv+J24rYuFbuBktlKk1L7
+f0N/4dCeFlHjoaVykS0BuQrEA8/ZPzE4qPw7FFWUb3M+gZH7xulSQnc+Q4oCVdYw
+FKsoB5iz924R4g09Yk9l8wXIkKvhCFwkioapZLqFQyse1hcsEUmBJzLBjjXWkw9+
+pXe5AQ0ES33CKgEIALqimXtgSq6zB21b2z4mVIAgSHFRQOa3q5jhWYvHALnwSIIc
+AvPHnPfWUuRyaGjJZbVgLdB0o+ooxYT8SdZtPVFMpVvoAaLH/4t1DJvQTXYUW+vE
+Z6cWeC5rr+JXcwLa8xhSYYhv3y7Lf3AwdKq6bzF5+7/rwDu7K5LhzS+hVrhln4Uq
+yFNQdnWoXIsBIt0wxut3KuRyNICjYs6Jr0zkFR7azCJCDml6+NnGX/jCtK5HE0AZ
+f11VOgds1kH1bohI0FkzRXk5PbMgvnxRZNlSGxzTUceYystJq/iIuzvSH3ixMKqL
+Se0KAOj1FrcpGsP+RF17sooIMjntg2MZKr7STTsAEQEAAYkBHwQYAQIACQUCS33C
+KgIbDAAKCRBoJVowUwODKP9GB/9TsH6lBFz/ueae0AD6gwGGRySkc+zilFYYBAbR
+jEpPpoyj5KCVYcQ1Q5SPeLzy7/4hQJlf0F0gg8w3G4Axjnaej4oDuNIGBKGKTLsl
+SaBI2P44wlVdDOaUhgAVB/zzCqUidLYVwoOmT3OLz01lZgPXKihw+I5JtNvdpOa+
+xXHQ7dwezmUMChHZADF198/2pRIamqwMD0MyApaMFeb5posM0KYHj+k6x8uMQB8m
+V6IdsgqZpglXO7gfuvz3mQqye50e0xIhCLdX6JwIOnLGzUPJW9ds33rWkzk9MW+h
+JnjuiZeYbtlLzCzuvRVjnSblrUUCaOm1RFVWrw6yVVTGHErmmQINBE6OPIgBEAC5
+XC3CGy1peReJPUk2J5iCvmdSIOEaiOfULaz+1WiKgBMovUAKCbIuCB9ZMjfT0gce
+Agqj4UeCeRhPi2dVxVl/r1LCyGMJ1hZyEGDk7No1QkCemnCD4yiBD+BX+9V1Zuqa
+r1goV5tMXoE0kq0cyoih/c6t8qrP5Wf/BTS118TBFAHRql/yYle+g3YEk0uZ4yue
+dCVQIrjTWe46fSAacF8eGluGQVTbNj2aRacfu+UYPj8G241F8gTOTjZ3fJ80KTOI
+tjiV8Gy+I6HDIWd7XMwNCO8z2G2lto0CBoDuaUukXgB5/CNFQL1S8Zxl2idxEQSt
+ZzKTKmZZcom+GNMvL8xCE8sMgMSiZ8Q54fRevTQrDLPpIhl0g6V1v7vOfokWxf7K
+6t+KZXB7oZbc1YrW/kcjDUAMlS4gAQ4K7ngEK3qIEVquvXx4nLVM++zn0/0PVzES
+2gMnacP9mdsVsPbJx+Jwk6hZi1mYBS7DyZkoVJYgXqLdCtWebH+VqdHiEHBrWYxH
+eoyHWjViJKZYh3ojcKuZMicptEzA25hz5HaKEkKv1PEYeKEDG8tLVf09dU+HHdK7
+Sbca08hXFhDIiDlksFa67Zy7MriJRTun8rTCD1DFH3wyuubRu8bDo7UobBgSK3td
+cBRKM8i3e3JQZejhKlROhJ56xIT3ivO3Coe0hTUDXQARAQABtEtOZWhhIE5hcmto
+ZWRlIChLZXkgZm9yIHNpZ25pbmcgY29kZSBhbmQgcmVsZWFzZXMpIDxuZWhhbmFy
+a2hlZGVAYXBhY2hlLm9yZz6JAjgEEwECACIFAk6OPIgCGwMGCwkIBwMCBhUIAgkK
+CwQWAgMBAh4BAheAAAoJEGx0AWyZNptWXSsP/i75oSJ50HJBRSni0oT6SxDG0Ybs
+Gj3Ojwn2FppOOpVW7kjSRPTb/gSTHYlxNvBxI+nkyKx4Z2+IloDaH2LUsxdAkBor
+9a58ykW9BD5Yd/5nRvduHp71XV28b3/xnN5H6kbCUr5yWZPVZ4//o8L12PS3Jy0i
+c9cQQF7vzuqQOvPpncruBChikEnSrwFmZI+UMRBBduCck6PNyeWTjb8ipfJtvwfO
+vQHX5+AOSz6zwICkzVfOC/nVgSgJtOW/5sF+aZRGylQNpduwl3hB/fqFGuPqPFQD
+NoVmCen4YQkzXcKF/cWum89GQgrH5sPlUqI0FFbeieSWV6fXeOjk6Jeyccd6VBFm
+3jSM6GVEwUqxe0pCBkmkL8QOWBmwhgVdb9e+Sonq6RTcbZlZEOu4tmAuLmdlYMry
+Vdw/zIe4qoLufbgKljo9467Kp+yWdXGp8kk55bLyqDnxzyUzYGwVcCiAthP4cWKN
+Aygibby4eyHe+D2GjlQNBMfl3lzcFD4SpJBHMgfF4/7eOv79/estNiAI8GXaPdVd
+bHFNwNalSsdrL4V6rsDuCmjAqpRe3blhYTg133bMAXMbvah0w4O/zvZKcD8tS+oy
+enMEezN/n1yl+33BL6VPbslnNx+rJ6ZXeuFq1QuXdwpcnUGL2hzJH7RVlGDcZpar
+goB5Qax+bx0f4uZhuQINBE6OPIgBEADnVeGW0oQWDB

kafka git commit: KAFKA-1879; Log warning when receiving produce requests with acks 1; patched by Gwen Shapira; reviewed by Jun Rao

2015-01-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 1e640177f - 059b09dec


KAFKA-1879; Log warning when receiving produce requests with acks  1; patched 
by Gwen Shapira; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/059b09de
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/059b09de
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/059b09de

Branch: refs/heads/0.8.2
Commit: 059b09decf52e774fe7aadc00ca3ef715cd4016c
Parents: 1e64017
Author: Gwen Shapira csh...@gmail.com
Authored: Mon Jan 19 14:07:51 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 19 14:07:51 2015 -0800

--
 core/src/main/scala/kafka/server/KafkaApis.scala | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/059b09de/core/src/main/scala/kafka/server/KafkaApis.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala 
b/core/src/main/scala/kafka/server/KafkaApis.scala
index 7def852..3d8eba6 100644
--- a/core/src/main/scala/kafka/server/KafkaApis.scala
+++ b/core/src/main/scala/kafka/server/KafkaApis.scala
@@ -194,6 +194,12 @@ class KafkaApis(val requestChannel: RequestChannel,
 (request.requestObj.asInstanceOf[ProducerRequest], None)
   }
 
+if (produceRequest.requiredAcks  1 || produceRequest.requiredAcks  -1) {
+  warn((Client %s from %s sent a produce request with 
request.required.acks of %d, which is now deprecated and will  +
+be removed in next release. Valid values are -1, 0 or 1. Please 
consult Kafka documentation for supported  +
+and recommended configuration.).format(produceRequest.clientId, 
request.remoteAddress, produceRequest.requiredAcks))
+}
+
 val sTime = SystemTime.milliseconds
 val localProduceResults = appendToLocalLog(produceRequest, 
offsetCommitRequestOpt.nonEmpty)
 debug(Produce to local log in %d ms.format(SystemTime.milliseconds - 
sTime))



kafka git commit: KAFKA-1723; num.partitions documented default is 1 while actual default is 2; patched by Manikumar Reddy; reviewed by Jun Rao

2015-01-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 86a8bc2b0 - 7013b2639


KAFKA-1723; num.partitions documented default is 1 while actual default is 2; 
patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7013b263
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7013b263
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7013b263

Branch: refs/heads/0.8.2
Commit: 7013b2639eda1ef3e7b2d794bf8513ba40ef34bf
Parents: 86a8bc2
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Mon Jan 19 08:43:57 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 19 08:43:57 2015 -0800

--
 config/server.properties   | 4 ++--
 core/src/main/scala/kafka/server/KafkaConfig.scala | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/7013b263/config/server.properties
--
diff --git a/config/server.properties b/config/server.properties
index b0e4496..1614260 100644
--- a/config/server.properties
+++ b/config/server.properties
@@ -46,7 +46,7 @@ num.io.threads=8
 socket.send.buffer.bytes=102400
 
 # The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=65536
+socket.receive.buffer.bytes=102400
 
 # The maximum size of a request that the socket server will accept (protection 
against OOM)
 socket.request.max.bytes=104857600
@@ -118,4 +118,4 @@ log.cleaner.enable=false
 zookeeper.connect=localhost:2181
 
 # Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=2000
+zookeeper.connection.timeout.ms=6000

http://git-wip-us.apache.org/repos/asf/kafka/blob/7013b263/core/src/main/scala/kafka/server/KafkaConfig.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index e3396ad..e5fecae 100644
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -92,7 +92,7 @@ class KafkaConfig private (val props: VerifiableProperties) 
extends ZKConfig(pro
   /*** Socket Server Configuration ***/
 
   /* the port to listen and accept connections on */
-  val port: Int = props.getInt(port, 6667)
+  val port: Int = props.getInt(port, 9092)
 
   /* hostname of broker. If this is set, it will only bind to this address. If 
this is not set,
* it will bind to all interfaces */



kafka git commit: KAFKA-1723; num.partitions documented default is 1 while actual default is 2; patched by Manikumar Reddy; reviewed by Jun Rao

2015-01-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 09234f0e5 - 49d7f8ee1


KAFKA-1723; num.partitions documented default is 1 while actual default is 2; 
patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/49d7f8ee
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/49d7f8ee
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/49d7f8ee

Branch: refs/heads/trunk
Commit: 49d7f8ee1cd96a99f44ac48043db9ad8a5336c7d
Parents: 09234f0
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Mon Jan 19 08:44:53 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 19 08:44:53 2015 -0800

--
 config/server.properties   | 4 ++--
 core/src/main/scala/kafka/server/KafkaConfig.scala | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/49d7f8ee/config/server.properties
--
diff --git a/config/server.properties b/config/server.properties
index b0e4496..1614260 100644
--- a/config/server.properties
+++ b/config/server.properties
@@ -46,7 +46,7 @@ num.io.threads=8
 socket.send.buffer.bytes=102400
 
 # The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=65536
+socket.receive.buffer.bytes=102400
 
 # The maximum size of a request that the socket server will accept (protection 
against OOM)
 socket.request.max.bytes=104857600
@@ -118,4 +118,4 @@ log.cleaner.enable=false
 zookeeper.connect=localhost:2181
 
 # Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=2000
+zookeeper.connection.timeout.ms=6000

http://git-wip-us.apache.org/repos/asf/kafka/blob/49d7f8ee/core/src/main/scala/kafka/server/KafkaConfig.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index 88689df..6d74983 100644
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -100,7 +100,7 @@ class KafkaConfig private (val props: VerifiableProperties) 
extends ZKConfig(pro
   /*** Socket Server Configuration ***/
 
   /* the port to listen and accept connections on */
-  val port: Int = props.getInt(port, 6667)
+  val port: Int = props.getInt(port, 9092)
 
   /* hostname of broker. If this is set, it will only bind to this address. If 
this is not set,
* it will bind to all interfaces */



kafka git commit: KAFKA-1876; pom file for scala 2.11 should reference a specific version; patched by Jun Rao; reviewed by Joe Stein and Sriharsha Chintalapani

2015-01-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 7013b2639 - 1e640177f


KAFKA-1876; pom file for scala 2.11 should reference a specific version; 
patched by Jun Rao; reviewed by Joe Stein and Sriharsha Chintalapani


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1e640177
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1e640177
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1e640177

Branch: refs/heads/0.8.2
Commit: 1e640177f0fcdc9479a7dc9214d11c240f0786c2
Parents: 7013b26
Author: Jun Rao jun...@gmail.com
Authored: Mon Jan 19 11:10:50 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 19 11:10:50 2015 -0800

--
 README.md|  2 +-
 build.gradle | 14 +++---
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1e640177/README.md
--
diff --git a/README.md b/README.md
index 9aca906..c91efbe 100644
--- a/README.md
+++ b/README.md
@@ -49,7 +49,7 @@ The release file can be found inside 
./core/build/distributions/.
 ### Cleaning the build ###
 ./gradlew clean
 
-### Running a task on a particular version of Scala (either 2.9.1, 2.9.2, 
2.10.1 or 2.11) ###
+### Running a task on a particular version of Scala (either 2.9.1, 2.9.2, 
2.10.4 or 2.11.5) ###
  (If building a jar with a version other than 2.10, need to set 
SCALA_BINARY_VERSION variable or change it in bin/kafka-run-class.sh to run 
quick start.) 
 ./gradlew -PscalaVersion=2.9.1 jar
 ./gradlew -PscalaVersion=2.9.1 test

http://git-wip-us.apache.org/repos/asf/kafka/blob/1e640177/build.gradle
--
diff --git a/build.gradle b/build.gradle
index c9ac433..1cbab29 100644
--- a/build.gradle
+++ b/build.gradle
@@ -137,7 +137,7 @@ subprojects {
   }
 }
 
-for ( sv in ['2_9_1', '2_9_2', '2_10_4', '2_11'] ) {
+for ( sv in ['2_9_1', '2_9_2', '2_10_4', '2_11_5'] ) {
   String svInDot = sv.replaceAll( _, .)
 
   tasks.create(name: jar_core_${sv}, type: GradleBuild) {
@@ -177,20 +177,20 @@ for ( sv in ['2_9_1', '2_9_2', '2_10_4', '2_11'] ) {
   }
 }
 
-tasks.create(name: jarAll, dependsOn: ['jar_core_2_9_1', 'jar_core_2_9_2', 
'jar_core_2_10_4', 'jar_core_2_11', 'clients:jar', 'examples:jar', 
'contrib:hadoop-consumer:jar', 'contrib:hadoop-producer:jar']) {
+tasks.create(name: jarAll, dependsOn: ['jar_core_2_9_1', 'jar_core_2_9_2', 
'jar_core_2_10_4', 'jar_core_2_11_5', 'clients:jar', 'examples:jar', 
'contrib:hadoop-consumer:jar', 'contrib:hadoop-producer:jar']) {
 }
 
-tasks.create(name: srcJarAll, dependsOn: ['srcJar_2_9_1', 'srcJar_2_9_2', 
'srcJar_2_10_4', 'srcJar_2_11', 'clients:srcJar', 'examples:srcJar', 
'contrib:hadoop-consumer:srcJar', 'contrib:hadoop-producer:srcJar']) { }
+tasks.create(name: srcJarAll, dependsOn: ['srcJar_2_9_1', 'srcJar_2_9_2', 
'srcJar_2_10_4', 'srcJar_2_11_5', 'clients:srcJar', 'examples:srcJar', 
'contrib:hadoop-consumer:srcJar', 'contrib:hadoop-producer:srcJar']) { }
 
-tasks.create(name: docsJarAll, dependsOn: ['docsJar_2_9_1', 'docsJar_2_9_2', 
'docsJar_2_10_4', 'docsJar_2_11', 'clients:docsJar', 'examples:docsJar', 
'contrib:hadoop-consumer:docsJar', 'contrib:hadoop-producer:docsJar']) { }
+tasks.create(name: docsJarAll, dependsOn: ['docsJar_2_9_1', 'docsJar_2_9_2', 
'docsJar_2_10_4', 'docsJar_2_11_5', 'clients:docsJar', 'examples:docsJar', 
'contrib:hadoop-consumer:docsJar', 'contrib:hadoop-producer:docsJar']) { }
 
-tasks.create(name: testAll, dependsOn: ['test_core_2_9_1', 
'test_core_2_9_2', 'test_core_2_10_4', 'test_core_2_11', 'clients:test']) {
+tasks.create(name: testAll, dependsOn: ['test_core_2_9_1', 
'test_core_2_9_2', 'test_core_2_10_4', 'test_core_2_11_5', 'clients:test']) {
 }
 
-tasks.create(name: releaseTarGzAll, dependsOn: ['releaseTarGz_2_9_1', 
'releaseTarGz_2_9_2', 'releaseTarGz_2_10_4', 'releaseTarGz_2_11']) {
+tasks.create(name: releaseTarGzAll, dependsOn: ['releaseTarGz_2_9_1', 
'releaseTarGz_2_9_2', 'releaseTarGz_2_10_4', 'releaseTarGz_2_11_5']) {
 }
 
-tasks.create(name: uploadArchivesAll, dependsOn: 
['uploadCoreArchives_2_9_1', 'uploadCoreArchives_2_9_2', 
'uploadCoreArchives_2_10_4', 'uploadCoreArchives_2_11', 
'clients:uploadArchives', 'examples:uploadArchives', 
'contrib:hadoop-consumer:uploadArchives', 
'contrib:hadoop-producer:uploadArchives']) {
+tasks.create(name: uploadArchivesAll, dependsOn: 
['uploadCoreArchives_2_9_1', 'uploadCoreArchives_2_9_2', 
'uploadCoreArchives_2_10_4', 'uploadCoreArchives_2_11_5', 
'clients:uploadArchives', 'examples:uploadArchives', 
'contrib:hadoop-consumer:uploadArchives', 
'contrib:hadoop-producer:uploadArchives']) {
 }
 
 project(':core') {



kafka git commit: KAFKA-1876; pom file for scala 2.11 should reference a specific version; patched by Jun Rao; reviewed by Joe Stein and Sriharsha Chintalapani

2015-01-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 07cff7636 - 5d648cfdb


KAFKA-1876; pom file for scala 2.11 should reference a specific version; 
patched by Jun Rao; reviewed by Joe Stein and Sriharsha Chintalapani


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/5d648cfd
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/5d648cfd
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/5d648cfd

Branch: refs/heads/trunk
Commit: 5d648cfdb13dee078bb1b3415f77dbe2d47d7ec4
Parents: 07cff76
Author: Jun Rao jun...@gmail.com
Authored: Mon Jan 19 11:18:23 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 19 11:18:23 2015 -0800

--
 README.md|  2 +-
 build.gradle | 14 +++---
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/5d648cfd/README.md
--
diff --git a/README.md b/README.md
index 11dfdf9..35e06b1 100644
--- a/README.md
+++ b/README.md
@@ -49,7 +49,7 @@ The release file can be found inside 
./core/build/distributions/.
 ### Cleaning the build ###
 ./gradlew clean
 
-### Running a task on a particular version of Scala (either 2.9.1, 2.9.2, 
2.10.1 or 2.11) ###
+### Running a task on a particular version of Scala (either 2.9.1, 2.9.2, 
2.10.4 or 2.11.5) ###
  (If building a jar with a version other than 2.10, need to set 
SCALA_BINARY_VERSION variable or change it in bin/kafka-run-class.sh to run 
quick start.) 
 ./gradlew -PscalaVersion=2.9.1 jar
 ./gradlew -PscalaVersion=2.9.1 test

http://git-wip-us.apache.org/repos/asf/kafka/blob/5d648cfd/build.gradle
--
diff --git a/build.gradle b/build.gradle
index c9ac433..1cbab29 100644
--- a/build.gradle
+++ b/build.gradle
@@ -137,7 +137,7 @@ subprojects {
   }
 }
 
-for ( sv in ['2_9_1', '2_9_2', '2_10_4', '2_11'] ) {
+for ( sv in ['2_9_1', '2_9_2', '2_10_4', '2_11_5'] ) {
   String svInDot = sv.replaceAll( _, .)
 
   tasks.create(name: jar_core_${sv}, type: GradleBuild) {
@@ -177,20 +177,20 @@ for ( sv in ['2_9_1', '2_9_2', '2_10_4', '2_11'] ) {
   }
 }
 
-tasks.create(name: jarAll, dependsOn: ['jar_core_2_9_1', 'jar_core_2_9_2', 
'jar_core_2_10_4', 'jar_core_2_11', 'clients:jar', 'examples:jar', 
'contrib:hadoop-consumer:jar', 'contrib:hadoop-producer:jar']) {
+tasks.create(name: jarAll, dependsOn: ['jar_core_2_9_1', 'jar_core_2_9_2', 
'jar_core_2_10_4', 'jar_core_2_11_5', 'clients:jar', 'examples:jar', 
'contrib:hadoop-consumer:jar', 'contrib:hadoop-producer:jar']) {
 }
 
-tasks.create(name: srcJarAll, dependsOn: ['srcJar_2_9_1', 'srcJar_2_9_2', 
'srcJar_2_10_4', 'srcJar_2_11', 'clients:srcJar', 'examples:srcJar', 
'contrib:hadoop-consumer:srcJar', 'contrib:hadoop-producer:srcJar']) { }
+tasks.create(name: srcJarAll, dependsOn: ['srcJar_2_9_1', 'srcJar_2_9_2', 
'srcJar_2_10_4', 'srcJar_2_11_5', 'clients:srcJar', 'examples:srcJar', 
'contrib:hadoop-consumer:srcJar', 'contrib:hadoop-producer:srcJar']) { }
 
-tasks.create(name: docsJarAll, dependsOn: ['docsJar_2_9_1', 'docsJar_2_9_2', 
'docsJar_2_10_4', 'docsJar_2_11', 'clients:docsJar', 'examples:docsJar', 
'contrib:hadoop-consumer:docsJar', 'contrib:hadoop-producer:docsJar']) { }
+tasks.create(name: docsJarAll, dependsOn: ['docsJar_2_9_1', 'docsJar_2_9_2', 
'docsJar_2_10_4', 'docsJar_2_11_5', 'clients:docsJar', 'examples:docsJar', 
'contrib:hadoop-consumer:docsJar', 'contrib:hadoop-producer:docsJar']) { }
 
-tasks.create(name: testAll, dependsOn: ['test_core_2_9_1', 
'test_core_2_9_2', 'test_core_2_10_4', 'test_core_2_11', 'clients:test']) {
+tasks.create(name: testAll, dependsOn: ['test_core_2_9_1', 
'test_core_2_9_2', 'test_core_2_10_4', 'test_core_2_11_5', 'clients:test']) {
 }
 
-tasks.create(name: releaseTarGzAll, dependsOn: ['releaseTarGz_2_9_1', 
'releaseTarGz_2_9_2', 'releaseTarGz_2_10_4', 'releaseTarGz_2_11']) {
+tasks.create(name: releaseTarGzAll, dependsOn: ['releaseTarGz_2_9_1', 
'releaseTarGz_2_9_2', 'releaseTarGz_2_10_4', 'releaseTarGz_2_11_5']) {
 }
 
-tasks.create(name: uploadArchivesAll, dependsOn: 
['uploadCoreArchives_2_9_1', 'uploadCoreArchives_2_9_2', 
'uploadCoreArchives_2_10_4', 'uploadCoreArchives_2_11', 
'clients:uploadArchives', 'examples:uploadArchives', 
'contrib:hadoop-consumer:uploadArchives', 
'contrib:hadoop-producer:uploadArchives']) {
+tasks.create(name: uploadArchivesAll, dependsOn: 
['uploadCoreArchives_2_9_1', 'uploadCoreArchives_2_9_2', 
'uploadCoreArchives_2_10_4', 'uploadCoreArchives_2_11_5', 
'clients:uploadArchives', 'examples:uploadArchives', 
'contrib:hadoop-consumer:uploadArchives', 
'contrib:hadoop-producer:uploadArchives']) {
 }
 
 project(':core') {



kafka git commit: kafka-1868; ConsoleConsumer shouldn't override dual.commit.enabled to false if not explicitly set; patched by Jun Rao; reviewed by Jeol Koshy

2015-01-15 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 45697ed6c - 602d572f6


kafka-1868; ConsoleConsumer shouldn't override dual.commit.enabled to false if 
not explicitly set; patched by Jun Rao; reviewed by Jeol Koshy


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/602d572f
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/602d572f
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/602d572f

Branch: refs/heads/trunk
Commit: 602d572f6365f80f14e8c968390972ea9767e263
Parents: 45697ed
Author: Jun Rao jun...@gmail.com
Authored: Thu Jan 15 17:58:32 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jan 15 17:58:32 2015 -0800

--
 core/src/main/scala/kafka/tools/ConsoleConsumer.scala | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/602d572f/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala 
b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
index 323fc85..e455cb9 100644
--- a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
+++ b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
@@ -126,10 +126,6 @@ object ConsoleConsumer extends Logging {
 }
 consumerProps.put(auto.offset.reset, if(options.has(resetBeginningOpt)) 
smallest else largest)
 consumerProps.put(zookeeper.connect, options.valueOf(zkConnectOpt))
-if(!consumerProps.containsKey(dual.commit.enabled))
-  consumerProps.put(dual.commit.enabled,false)
-if(!consumerProps.containsKey(offsets.storage))
-  consumerProps.put(offsets.storage,zookeeper)
 
 if (!options.has(deleteConsumerOffsetsOpt)  
options.has(resetBeginningOpt) 
checkZkPathExists(options.valueOf(zkConnectOpt),/consumers/ + 
consumerProps.getProperty(group.id)+ /offsets)) {



svn commit: r1653422 - /kafka/site/082/configuration.html

2015-01-20 Thread junrao
Author: junrao
Date: Wed Jan 21 02:16:57 2015
New Revision: 1653422

URL: http://svn.apache.org/r1653422
Log:
update default config value for 0.8.2

Modified:
kafka/site/082/configuration.html

Modified: kafka/site/082/configuration.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/configuration.html?rev=1653422r1=1653421r2=1653422view=diff
==
--- kafka/site/082/configuration.html (original)
+++ kafka/site/082/configuration.html Wed Jan 21 02:16:57 2015
@@ -30,7 +30,7 @@ Topic-level configurations and defaults
 /tr
 tr
   tdport/td
-  td6667/td
+  td9092/td
   tdThe port on which the server accepts client connections./td
 /tr
 tr
@@ -57,7 +57,7 @@ ZooKeeper also allows you to add a chro
 /tr
 tr
   tdbackground.threads/td
-  td4/td
+  td10/td
   tdThe number of threads to use for various background processing tasks 
such as file deletion. You should not need to change this./td
 /tr
 tr
@@ -148,7 +148,7 @@ ZooKeeper also allows you to add a chro
 /tr
 tr
   tdlog.cleaner.io.max.bytes.per.second/td
-  tdNone/td
+  tdDouble.MaxValue/td
   tdThe maximum amount of I/O the log cleaner can do while performing 
log compaction. This setting allows setting a limit for the cleaner to avoid 
impacting live request serving./td
 /tr
 tr
@@ -193,17 +193,17 @@ ZooKeeper also allows you to add a chro
 /tr
 tr
   tdlog.flush.interval.messages/td
-  tdNone/td
+  tdLong.MaxValue/td
   tdThe number of messages written to a log partition before we force an 
fsync on the log. Setting this lower will sync data to disk more often but will 
have a major impact on performance. We generally recommend that people make use 
of replication for durability rather than depending on single-server fsync, 
however this setting can be used to be extra certain./td
 /tr
 tr
   tdlog.flush.scheduler.interval.ms/td
-  td3000/td
+  tdLong.MaxValue/td
   tdThe frequency in ms that the log flusher checks whether any log is 
eligible to be flushed to disk./td
 /tr
 tr
   tdlog.flush.interval.ms/td
-  tdNone/td
+  tdLong.MaxValue/td
   tdThe maximum time between fsync calls on the log. If used in 
conjuction with log.flush.interval.messages the log will be flushed when either 
criteria is met./td
 /tr
 tr
@@ -219,7 +219,7 @@ ZooKeeper also allows you to add a chro
 tr
   tdauto.create.topics.enable/td
   tdtrue/td
-  tdEnable auto creation of topic on the server.  If this is set to true 
then attempts to produce, consume, or fetch metadata for a non-existent topic 
will automatically create it with the default replication factor and number of 
partitions./td
+  tdEnable auto creation of topic on the server.  If this is set to true 
then attempts to produce data or fetch metadata for a non-existent topic will 
automatically create it with the default replication factor and number of 
partitions./td
 /tr
 tr
   tdcontroller.socket.timeout.ms/td
@@ -228,7 +228,7 @@ ZooKeeper also allows you to add a chro
 /tr
 tr
   tdcontroller.message.queue.size/td
-  td10/td
+  tdInt.MaxValue/td
   tdThe buffer size for controller-to-broker-channels/td
 /tr
 tr
@@ -285,12 +285,12 @@ ZooKeeper also allows you to add a chro
 /tr
 tr
   tdfetch.purgatory.purge.interval.requests/td
-  td1/td
+  td1000/td
   tdThe purge interval (in number of requests) of the fetch request 
purgatory./td
 /tr
 tr
   tdproducer.purgatory.purge.interval.requests/td
-  td1/td
+  td1000/td
   tdThe purge interval (in number of requests) of the producer request 
purgatory./td
 /tr
 tr
@@ -341,7 +341,7 @@ ZooKeeper also allows you to add a chro
 /tr
 tr
   tdoffset.metadata.max.bytes/td
-  td1024/td
+  td4096/td
   tdThe maximum amount of metadata to allow clients to save with their 
offsets./td
 /tr
 tr
@@ -557,7 +557,7 @@ The essential consumer configurations ar
 /tr
 tr
   tdqueued.max.message.chunks/td
-  td colspan=110/td
+  td colspan=12/td
   tdMax number of message chunks buffered for consumption. Each chunk 
can be up to fetch.message.max.bytes./td
 /tr
 tr




kafka git commit: kafka-1868; ConsoleConsumer shouldn't override dual.commit.enabled to false if not explicitly set; patched by Jun Rao; reviewed by Jeol Koshy

2015-01-15 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 a643be62a - 17c8bdcbb


kafka-1868; ConsoleConsumer shouldn't override dual.commit.enabled to false if 
not explicitly set; patched by Jun Rao; reviewed by Jeol Koshy


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/17c8bdcb
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/17c8bdcb
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/17c8bdcb

Branch: refs/heads/0.8.2
Commit: 17c8bdcbb42d81db47d03091049588238df7e3c6
Parents: a643be6
Author: Jun Rao jun...@gmail.com
Authored: Thu Jan 15 17:55:48 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jan 15 17:55:48 2015 -0800

--
 core/src/main/scala/kafka/tools/ConsoleConsumer.scala | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/17c8bdcb/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala 
b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
index 323fc85..e455cb9 100644
--- a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
+++ b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
@@ -126,10 +126,6 @@ object ConsoleConsumer extends Logging {
 }
 consumerProps.put(auto.offset.reset, if(options.has(resetBeginningOpt)) 
smallest else largest)
 consumerProps.put(zookeeper.connect, options.valueOf(zkConnectOpt))
-if(!consumerProps.containsKey(dual.commit.enabled))
-  consumerProps.put(dual.commit.enabled,false)
-if(!consumerProps.containsKey(offsets.storage))
-  consumerProps.put(offsets.storage,zookeeper)
 
 if (!options.has(deleteConsumerOffsetsOpt)  
options.has(resetBeginningOpt) 
checkZkPathExists(options.valueOf(zkConnectOpt),/consumers/ + 
consumerProps.getProperty(group.id)+ /offsets)) {



kafka git commit: kafka-1952; High CPU Usage in 0.8.2 release; patched by Jun Rao; reviewed by Guozhang Wang, Ewen Cheslack-Postava and Neha Narkhede

2015-02-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk d5fbba633 - f5684366e


kafka-1952; High CPU Usage in 0.8.2 release; patched by Jun Rao; reviewed by 
Guozhang Wang, Ewen Cheslack-Postava and Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/f5684366
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/f5684366
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/f5684366

Branch: refs/heads/trunk
Commit: f5684366ef60125c4d799121a6c0adca4744e8ab
Parents: d5fbba6
Author: Jun Rao jun...@gmail.com
Authored: Wed Feb 18 13:39:05 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Wed Feb 18 13:39:05 2015 -0800

--
 .../scala/kafka/server/DelayedOperation.scala   | 34 +---
 1 file changed, 23 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/f5684366/core/src/main/scala/kafka/server/DelayedOperation.scala
--
diff --git a/core/src/main/scala/kafka/server/DelayedOperation.scala 
b/core/src/main/scala/kafka/server/DelayedOperation.scala
index fc06b01..1d11099 100644
--- a/core/src/main/scala/kafka/server/DelayedOperation.scala
+++ b/core/src/main/scala/kafka/server/DelayedOperation.scala
@@ -128,25 +128,37 @@ class DelayedOperationPurgatory[T : 
DelayedOperation](brokerId: Int = 0, purgeI
* @return true iff the delayed operations can be completed by the caller
*/
   def tryCompleteElseWatch(operation: T, watchKeys: Seq[Any]): Boolean = {
+assert(watchKeys.size  0, The watch key list can't be empty)
+
+// The cost of tryComplete() is typically proportional to the number of 
keys. Calling
+// tryComplete() for each key is going to be expensive if there are many 
keys. Instead,
+// we do the check in the following way. Call tryComplete(). If the 
operation is not completed,
+// we just add the operation to all keys. Then we call tryComplete() 
again. At this time, if
+// the operation is still not completed, we are guaranteed that it won't 
miss any future triggering
+// event since the operation is already on the watcher list for all keys. 
This does mean that
+// if the operation is completed (by another thread) between the two 
tryComplete() calls, the
+// operation is unnecessarily added for watch. However, this is a less 
severe issue since the
+// expire reaper will clean it up periodically.
+
+var isCompletedByMe = operation synchronized operation.tryComplete()
+if (isCompletedByMe)
+  return true
+
 for(key - watchKeys) {
-  // if the operation is already completed, stopping adding it to
-  // any further lists and return false
+  // If the operation is already completed, stop adding it to the rest of 
the watcher list.
   if (operation.isCompleted())
 return false
   val watchers = watchersFor(key)
-  // if the operation can by completed by myself, stop adding it to
-  // any further lists and return true immediately
-  if(operation synchronized operation.tryComplete()) {
-return true
-  } else {
-watchers.watch(operation)
-  }
+  watchers.watch(operation)
 }
 
+isCompletedByMe = operation synchronized operation.tryComplete()
+if (isCompletedByMe)
+  return true
+
 // if it cannot be completed by now and hence is watched, add to the 
expire queue also
-if (! operation.isCompleted()) {
+if (! operation.isCompleted())
   expirationReaper.enqueue(operation)
-}
 
 false
   }



kafka git commit: KAFKA-1902; fix MetricName so that Yammer reporter can work correctly; patched by Jun Rao; reviewed by Manikumar Reddy, Manikumar Reddy and Joel Koshy

2015-01-28 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 0b312a6b9 - 2a1e3d451


KAFKA-1902; fix MetricName so that Yammer reporter can work correctly; patched 
by Jun Rao; reviewed by Manikumar Reddy, Manikumar Reddy and Joel Koshy


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/2a1e3d45
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/2a1e3d45
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/2a1e3d45

Branch: refs/heads/0.8.2
Commit: 2a1e3d4510e8fadb0cad0cb7290baf54aae39c23
Parents: 0b312a6
Author: Jun Rao jun...@gmail.com
Authored: Wed Jan 28 18:44:16 2015 -0600
Committer: Jun Rao jun...@gmail.com
Committed: Wed Jan 28 18:44:16 2015 -0600

--
 .../scala/kafka/metrics/KafkaMetricsGroup.scala | 27 ++--
 1 file changed, 25 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/2a1e3d45/core/src/main/scala/kafka/metrics/KafkaMetricsGroup.scala
--
diff --git a/core/src/main/scala/kafka/metrics/KafkaMetricsGroup.scala 
b/core/src/main/scala/kafka/metrics/KafkaMetricsGroup.scala
index e9e4918..9e31184 100644
--- a/core/src/main/scala/kafka/metrics/KafkaMetricsGroup.scala
+++ b/core/src/main/scala/kafka/metrics/KafkaMetricsGroup.scala
@@ -61,9 +61,15 @@ trait KafkaMetricsGroup extends Logging {
   nameBuilder.append(name)
 }
 
-KafkaMetricsGroup.toMBeanName(tags).map(mbeanName = 
nameBuilder.append(,).append(mbeanName))
+val scope: String = KafkaMetricsGroup.toScope(tags).getOrElse(null)
+val tagsName = KafkaMetricsGroup.toMBeanName(tags)
+tagsName match {
+  case Some(tn) =
+nameBuilder.append(,).append(tn)
+  case None =
+}
 
-new MetricName(group, typeName, name, null, nameBuilder.toString())
+new MetricName(group, typeName, name, scope, nameBuilder.toString())
   }
 
   def newGauge[T](name: String, metric: Gauge[T], tags: 
scala.collection.Map[String, String] = Map.empty) =
@@ -160,6 +166,23 @@ object KafkaMetricsGroup extends KafkaMetricsGroup with 
Logging {
 }
   }
 
+  private def toScope(tags: collection.Map[String, String]): Option[String] = {
+val filteredTags = tags
+  .filter { case (tagKey, tagValue) = tagValue != }
+if (filteredTags.nonEmpty) {
+  // convert dot to _ since reporters like Graphite typically use dot to 
represent hierarchy
+  val tagsString = filteredTags
+.toList.sortWith((t1, t2) = t1._1  t2._1)
+.map { case (key, value) = %s.%s.format(key, 
value.replaceAll(\\., _))}
+.mkString(.)
+
+  Some(tagsString)
+}
+else {
+  None
+}
+  }
+
   def removeAllConsumerMetrics(clientId: String) {
 
FetchRequestAndResponseStatsRegistry.removeConsumerFetchRequestAndResponseStats(clientId)
 ConsumerTopicStatsRegistry.removeConsumerTopicStat(clientId)



kafka git commit: KAFKA-1729; Add constructor to javaapi to allow constructing explicitly versioned offset commit requests; patched by Joel Koshy; reviewed by Jun Rao

2015-01-28 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 2a1e3d451 - 96ce96dc9


KAFKA-1729; Add constructor to javaapi to allow constructing explicitly 
versioned offset commit requests; patched by Joel Koshy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/96ce96dc
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/96ce96dc
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/96ce96dc

Branch: refs/heads/0.8.2
Commit: 96ce96dc959dd827ae1f70a17e89e05b91b6ba58
Parents: 2a1e3d4
Author: Joel Koshy jjko...@gmail.com
Authored: Wed Jan 28 19:16:43 2015 -0600
Committer: Jun Rao jun...@gmail.com
Committed: Wed Jan 28 19:16:43 2015 -0600

--
 .../main/scala/kafka/api/OffsetCommitResponse.scala   |  4 +++-
 .../kafka/javaapi/ConsumerMetadataResponse.scala  |  6 ++
 .../scala/kafka/javaapi/OffsetCommitRequest.scala | 14 --
 .../scala/kafka/javaapi/OffsetCommitResponse.scala|  9 +
 .../scala/kafka/javaapi/OffsetFetchResponse.scala |  5 +
 5 files changed, 35 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/96ce96dc/core/src/main/scala/kafka/api/OffsetCommitResponse.scala
--
diff --git a/core/src/main/scala/kafka/api/OffsetCommitResponse.scala 
b/core/src/main/scala/kafka/api/OffsetCommitResponse.scala
index 03dd736..abe67a5 100644
--- a/core/src/main/scala/kafka/api/OffsetCommitResponse.scala
+++ b/core/src/main/scala/kafka/api/OffsetCommitResponse.scala
@@ -20,7 +20,7 @@ package kafka.api
 import java.nio.ByteBuffer
 
 import kafka.utils.Logging
-import kafka.common.TopicAndPartition
+import kafka.common.{ErrorMapping, TopicAndPartition}
 
 object OffsetCommitResponse extends Logging {
   val CurrentVersion: Short = 1
@@ -50,6 +50,8 @@ case class OffsetCommitResponse(commitStatus: 
Map[TopicAndPartition, Short],
 
   lazy val commitStatusGroupedByTopic = commitStatus.groupBy(_._1.topic)
 
+  def hasError = commitStatus.exists{ case (topicAndPartition, errorCode) = 
errorCode != ErrorMapping.NoError }
+
   def writeTo(buffer: ByteBuffer) {
 buffer.putInt(correlationId)
 buffer.putInt(commitStatusGroupedByTopic.size)

http://git-wip-us.apache.org/repos/asf/kafka/blob/96ce96dc/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala 
b/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala
index 1b28861..d281bb3 100644
--- a/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala
+++ b/core/src/main/scala/kafka/javaapi/ConsumerMetadataResponse.scala
@@ -17,6 +17,8 @@
 
 package kafka.javaapi
 
+import java.nio.ByteBuffer
+
 import kafka.cluster.Broker
 
 class ConsumerMetadataResponse(private val underlying: 
kafka.api.ConsumerMetadataResponse) {
@@ -40,3 +42,7 @@ class ConsumerMetadataResponse(private val underlying: 
kafka.api.ConsumerMetadat
   override def toString = underlying.toString
 
 }
+
+object ConsumerMetadataResponse {
+  def readFrom(buffer: ByteBuffer) = new 
ConsumerMetadataResponse(kafka.api.ConsumerMetadataResponse.readFrom(buffer))
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/96ce96dc/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
--
diff --git a/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala 
b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
index 873f575..456c3c4 100644
--- a/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
+++ b/core/src/main/scala/kafka/javaapi/OffsetCommitRequest.scala
@@ -22,7 +22,8 @@ import kafka.common.{OffsetAndMetadata, TopicAndPartition}
 class OffsetCommitRequest(groupId: String,
   requestInfo: java.util.Map[TopicAndPartition, 
OffsetAndMetadata],
   correlationId: Int,
-  clientId: String) {
+  clientId: String,
+  versionId: Short) {
   val underlying = {
 val scalaMap: collection.immutable.Map[TopicAndPartition, 
OffsetAndMetadata] = {
   import collection.JavaConversions._
@@ -32,12 +33,21 @@ class OffsetCommitRequest(groupId: String,
 kafka.api.OffsetCommitRequest(
   groupId = groupId,
   requestInfo = scalaMap,
-  versionId = 0, // binds to version 0 so that it commits to Zookeeper
+  versionId = versionId,
   correlationId = correlationId,
   clientId = clientId
 )
   }
 
+  def this(groupId: String,
+   requestInfo: java.util.Map[TopicAndPartition, OffsetAndMetadata],
+   correlationId: Int,
+   

kafka git commit: KAFKA-1861; Publishing kafka-client:test in order to utilize the helper utils in TestUtils; patched by Manikumar Reddy; reviewed by Jun Rao

2015-01-28 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 96ce96dc9 - 7130da90a


KAFKA-1861; Publishing kafka-client:test in order to utilize the helper utils 
in TestUtils; patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7130da90
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7130da90
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7130da90

Branch: refs/heads/0.8.2
Commit: 7130da90a9ee9e6fb4beb2a2a6ab05c06c9bfac4
Parents: 96ce96d
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Wed Jan 28 19:39:56 2015 -0600
Committer: Jun Rao jun...@gmail.com
Committed: Wed Jan 28 19:39:56 2015 -0600

--
 build.gradle | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/7130da90/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 1cbab29..7982fe7 100644
--- a/build.gradle
+++ b/build.gradle
@@ -375,4 +375,7 @@ project(':clients') {
 include **/org/apache/kafka/common/serialization/*
   }
 
+  artifacts {
+archives testJar
+  }
 }



Git Push Summary

2015-01-28 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.0 [created] 223ac42a7


kafka git commit: kafka-1797; (addressing Manikumar Reddy's comment) add the serializer/deserializer api to the new java client; patched by Jun Rao; reviewed by Manikumar Reddy and Neha Narkhede

2015-01-09 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk f82518a85 - a93ef199b


kafka-1797; (addressing Manikumar Reddy's comment) add the 
serializer/deserializer api to the new java client; patched by Jun Rao; 
reviewed by Manikumar Reddy and Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a93ef199
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a93ef199
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a93ef199

Branch: refs/heads/trunk
Commit: a93ef199b2375c422e35d82ac7aa3a2fdacc1e74
Parents: f82518a
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 9 11:27:00 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 9 11:27:00 2015 -0800

--
 .../apache/kafka/common/serialization/StringDeserializer.java   | 5 -
 .../org/apache/kafka/common/serialization/StringSerializer.java | 5 -
 .../apache/kafka/common/serialization/SerializationTest.java| 4 +++-
 3 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a93ef199/clients/src/main/java/org/apache/kafka/common/serialization/StringDeserializer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/serialization/StringDeserializer.java
 
b/clients/src/main/java/org/apache/kafka/common/serialization/StringDeserializer.java
index a3b3700..9783ea0 100644
--- 
a/clients/src/main/java/org/apache/kafka/common/serialization/StringDeserializer.java
+++ 
b/clients/src/main/java/org/apache/kafka/common/serialization/StringDeserializer.java
@@ -37,7 +37,10 @@ public class StringDeserializer implements 
DeserializerString {
 @Override
 public String deserialize(String topic, byte[] data) {
 try {
-return new String(data, encoding);
+if (data == null)
+return null;
+else
+return new String(data, encoding);
 } catch (UnsupportedEncodingException e) {
 throw new SerializationException(Error when deserializing byte[] 
to string due to unsupported encoding  + encoding);
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/a93ef199/clients/src/main/java/org/apache/kafka/common/serialization/StringSerializer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/serialization/StringSerializer.java
 
b/clients/src/main/java/org/apache/kafka/common/serialization/StringSerializer.java
index 02db47f..636d905 100644
--- 
a/clients/src/main/java/org/apache/kafka/common/serialization/StringSerializer.java
+++ 
b/clients/src/main/java/org/apache/kafka/common/serialization/StringSerializer.java
@@ -37,7 +37,10 @@ public class StringSerializer implements SerializerString {
 @Override
 public byte[] serialize(String topic, String data) {
 try {
-return data.getBytes(encoding);
+if (data == null)
+return null;
+else
+return data.getBytes(encoding);
 } catch (UnsupportedEncodingException e) {
 throw new SerializationException(Error when serializing string to 
byte[] due to unsupported encoding  + encoding);
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/a93ef199/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
--
diff --git 
a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
 
b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
index d550a31..b6e1497 100644
--- 
a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
+++ 
b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java
@@ -48,8 +48,10 @@ public class SerializationTest {
 
 assertEquals(Should get the original string after serialization 
and deserialization with encoding  + encoding,
 str, deserializer.deserialize(mytopic, 
serializer.serialize(mytopic, str)));
-}
 
+assertEquals(Should support null in serialization and 
deserialization with encoding  + encoding,
+null, deserializer.deserialize(mytopic, 
serializer.serialize(mytopic, null)));
+}
 }
 
 private SerDeserString getStringSerDeser(String encoder) {



kafka git commit: kafka-1851; OffsetFetchRequest returns extra partitions when input only contains unknown partitions; patched by Jun Rao; reviewed by Neha Narkhede

2015-01-09 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk a93ef199b - e52a6181b


kafka-1851; OffsetFetchRequest returns extra partitions when input only 
contains unknown partitions; patched by Jun Rao; reviewed by Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e52a6181
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e52a6181
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e52a6181

Branch: refs/heads/trunk
Commit: e52a6181bf0969f315ac0f0d325eac34d2b4a6ee
Parents: a93ef19
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 9 11:33:48 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 9 11:33:48 2015 -0800

--
 core/src/main/scala/kafka/server/KafkaApis.scala| 6 +-
 .../src/test/scala/unit/kafka/server/OffsetCommitTest.scala | 9 -
 2 files changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e52a6181/core/src/main/scala/kafka/server/KafkaApis.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala 
b/core/src/main/scala/kafka/server/KafkaApis.scala
index 2a1c032..c011a1b 100644
--- a/core/src/main/scala/kafka/server/KafkaApis.scala
+++ b/core/src/main/scala/kafka/server/KafkaApis.scala
@@ -396,7 +396,11 @@ class KafkaApis(val requestChannel: RequestChannel,
   metadataCache.getPartitionInfo(topicAndPartition.topic, 
topicAndPartition.partition).isEmpty
 )
 val unknownStatus = unknownTopicPartitions.map(topicAndPartition = 
(topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)).toMap
-val knownStatus = offsetManager.getOffsets(offsetFetchRequest.groupId, 
knownTopicPartitions).toMap
+val knownStatus =
+  if (knownTopicPartitions.size  0)
+offsetManager.getOffsets(offsetFetchRequest.groupId, 
knownTopicPartitions).toMap
+  else
+Map.empty[TopicAndPartition, OffsetMetadataAndError]
 val status = unknownStatus ++ knownStatus
 
 val response = OffsetFetchResponse(status, 
offsetFetchRequest.correlationId)

http://git-wip-us.apache.org/repos/asf/kafka/blob/e52a6181/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala 
b/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
index 8c5364f..4a3a5b2 100644
--- a/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
+++ b/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
@@ -79,7 +79,7 @@ class OffsetCommitTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 // create the topic
 createTopic(zkClient, topic, partitionReplicaAssignment = 
expectedReplicaAssignment, servers = Seq(server))
 
-val commitRequest = OffsetCommitRequest(test-group, 
immutable.Map(topicAndPartition - OffsetAndMetadata(offset=42L)))
+val commitRequest = OffsetCommitRequest(group, 
immutable.Map(topicAndPartition - OffsetAndMetadata(offset=42L)))
 val commitResponse = simpleConsumer.commitOffsets(commitRequest)
 
 assertEquals(ErrorMapping.NoError, 
commitResponse.commitStatus.get(topicAndPartition).get)
@@ -109,6 +109,13 @@ class OffsetCommitTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 assertEquals(some metadata, 
fetchResponse1.requestInfo.get(topicAndPartition).get.metadata)
 assertEquals(100L, 
fetchResponse1.requestInfo.get(topicAndPartition).get.offset)
 
+// Fetch an unknown topic and verify
+val unknownTopicAndPartition = TopicAndPartition(unknownTopic, 0)
+val fetchRequest2 = OffsetFetchRequest(group, 
Seq(unknownTopicAndPartition))
+val fetchResponse2 = simpleConsumer.fetchOffsets(fetchRequest2)
+
+assertEquals(OffsetMetadataAndError.UnknownTopicOrPartition, 
fetchResponse2.requestInfo.get(unknownTopicAndPartition).get)
+assertEquals(1, fetchResponse2.requestInfo.size)
   }
 
   @Test



kafka git commit: kafka-1851; OffsetFetchRequest returns extra partitions when input only contains unknown partitions; patched by Jun Rao; reviewed by Neha Narkhede

2015-01-09 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 f71933ef1 - f88db16d1


kafka-1851; OffsetFetchRequest returns extra partitions when input only 
contains unknown partitions; patched by Jun Rao; reviewed by Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/f88db16d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/f88db16d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/f88db16d

Branch: refs/heads/0.8.2
Commit: f88db16d15ff8a1883b7aed3c60eefa64faa764c
Parents: f71933e
Author: Jun Rao jun...@gmail.com
Authored: Fri Jan 9 11:31:47 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jan 9 11:31:47 2015 -0800

--
 core/src/main/scala/kafka/server/KafkaApis.scala| 6 +-
 .../src/test/scala/unit/kafka/server/OffsetCommitTest.scala | 9 -
 2 files changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/f88db16d/core/src/main/scala/kafka/server/KafkaApis.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala 
b/core/src/main/scala/kafka/server/KafkaApis.scala
index 2f00992..9a61fcb 100644
--- a/core/src/main/scala/kafka/server/KafkaApis.scala
+++ b/core/src/main/scala/kafka/server/KafkaApis.scala
@@ -508,7 +508,11 @@ class KafkaApis(val requestChannel: RequestChannel,
   metadataCache.getPartitionInfo(topicAndPartition.topic, 
topicAndPartition.partition).isEmpty
 )
 val unknownStatus = unknownTopicPartitions.map(topicAndPartition = 
(topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)).toMap
-val knownStatus = offsetManager.getOffsets(offsetFetchRequest.groupId, 
knownTopicPartitions).toMap
+val knownStatus =
+  if (knownTopicPartitions.size  0)
+offsetManager.getOffsets(offsetFetchRequest.groupId, 
knownTopicPartitions).toMap
+  else
+Map.empty[TopicAndPartition, OffsetMetadataAndError]
 val status = unknownStatus ++ knownStatus
 
 val response = OffsetFetchResponse(status, 
offsetFetchRequest.correlationId)

http://git-wip-us.apache.org/repos/asf/kafka/blob/f88db16d/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala 
b/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
index 8c5364f..4a3a5b2 100644
--- a/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
+++ b/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
@@ -79,7 +79,7 @@ class OffsetCommitTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 // create the topic
 createTopic(zkClient, topic, partitionReplicaAssignment = 
expectedReplicaAssignment, servers = Seq(server))
 
-val commitRequest = OffsetCommitRequest(test-group, 
immutable.Map(topicAndPartition - OffsetAndMetadata(offset=42L)))
+val commitRequest = OffsetCommitRequest(group, 
immutable.Map(topicAndPartition - OffsetAndMetadata(offset=42L)))
 val commitResponse = simpleConsumer.commitOffsets(commitRequest)
 
 assertEquals(ErrorMapping.NoError, 
commitResponse.commitStatus.get(topicAndPartition).get)
@@ -109,6 +109,13 @@ class OffsetCommitTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 assertEquals(some metadata, 
fetchResponse1.requestInfo.get(topicAndPartition).get.metadata)
 assertEquals(100L, 
fetchResponse1.requestInfo.get(topicAndPartition).get.offset)
 
+// Fetch an unknown topic and verify
+val unknownTopicAndPartition = TopicAndPartition(unknownTopic, 0)
+val fetchRequest2 = OffsetFetchRequest(group, 
Seq(unknownTopicAndPartition))
+val fetchResponse2 = simpleConsumer.fetchOffsets(fetchRequest2)
+
+assertEquals(OffsetMetadataAndError.UnknownTopicOrPartition, 
fetchResponse2.requestInfo.get(unknownTopicAndPartition).get)
+assertEquals(1, fetchResponse2.requestInfo.size)
   }
 
   @Test



svn commit: r1650694 - /kafka/site/082/ops.html

2015-01-09 Thread junrao
Author: junrao
Date: Fri Jan  9 23:21:15 2015
New Revision: 1650694

URL: http://svn.apache.org/r1650694
Log:
update jmx metrics name for 0.8.2

Modified:
kafka/site/082/ops.html

Modified: kafka/site/082/ops.html
URL: 
http://svn.apache.org/viewvc/kafka/site/082/ops.html?rev=1650694r1=1650693r2=1650694view=diff
==
--- kafka/site/082/ops.html (original)
+++ kafka/site/082/ops.html Fri Jan  9 23:21:15 2015
@@ -458,62 +458,62 @@ We pay particular we do graphing and ale
 /tr
 tr
   tdMessage in rate/td
-  
tdkafka.server:name=AllTopicsMessagesInPerSec,type=BrokerTopicMetrics/td
+  tdkafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec/td
   td/td
 /tr
 tr
   tdByte in rate/td
-  
tdkafka.server:name=AllTopicsBytesInPerSec,type=BrokerTopicMetrics/td
+  tdkafka.server:type=BrokerTopicMetrics,name=BytesInPerSec/td
   td/td
 /tr
 tr
   tdRequest rate/td
-  
tdkafka.network:name={Produce|Fetch-consumer|Fetch-follower}-RequestsPerSec,type=RequestMetrics/td
+  
tdkafka.network:type=RequestMetrics,name=RequestsPerSec,request={Produce|FetchConsumer|FetchFollower}/td
   td/td
 /tr
 tr
   tdByte out rate/td
-  
tdkafka.server:name=AllTopicsBytesOutPerSec,type=BrokerTopicMetrics/td
+  tdkafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec/td
   td/td
 /tr
 tr
   tdLog flush rate and time/td
-  tdkafka.log:name=LogFlushRateAndTimeMs,type=LogFlushStats/td
+  tdkafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs/td
   td/td
 /tr
 tr
   td# of under replicated partitions (|ISR| lt |all replicas|)/td
-  
tdkafka.server:name=UnderReplicatedPartitions,type=ReplicaManager/td
+  tdkafka.server:type=ReplicaManager,name=UnderReplicatedPartitions/td
   td0/td
 /tr
 tr
   tdIs controller active on broker/td
-  
tdkafka.controller:name=ActiveControllerCount,type=KafkaController/td
+  tdkafka.controller:type=KafkaController,name=ActiveControllerCount/td
   tdonly one broker in the cluster should have 1/td
 /tr
 tr
   tdLeader election rate/td
-  
tdkafka.controller:name=LeaderElectionRateAndTimeMs,type=ControllerStats/td
+  
tdkafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs/td
   tdnon-zero when there are broker failures/td
 /tr
 tr
   tdUnclean leader election rate/td
-  
tdkafka.controller:name=UncleanLeaderElectionsPerSec,type=ControllerStats/td
+  
tdkafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec/td
   td0/td
 /tr
 tr
   tdPartition counts/td
-  tdkafka.server:name=PartitionCount,type=ReplicaManager/td
+  tdkafka.server:type=ReplicaManager,name=PartitionCount/td
   tdmostly even across brokers/td
 /tr
 tr
   tdLeader replica counts/td
-  tdkafka.server:name=LeaderCount,type=ReplicaManager/td
+  tdkafka.server:type=ReplicaManager,name=LeaderCount/td
   tdmostly even across brokers/td
 /tr
 tr
   tdISR shrink rate/td
-  tdkafka.server:name=ISRShrinksPerSec,type=ReplicaManager/td
+  tdkafka.server:type=ReplicaManager,name=IsrShrinksPerSec/td
   tdIf a broker goes down, ISR for some of the partitions will
shrink. When that broker is up again, ISR will be expanded
once the replicas are fully caught up. Other than that, the
@@ -521,67 +521,67 @@ We pay particular we do graphing and ale
 /tr
 tr
   tdISR expansion rate/td
-  tdkafka.server:name=ISRExpandsPerSec,type=ReplicaManager/td
+  tdkafka.server:type=ReplicaManager,name=IsrExpandsPerSec/td
   tdSee above/td
 /tr
 tr
   tdMax lag in messages btw follower and leader replicas/td
-  
tdkafka.server:name=([-.\w]+)-MaxLag,type=ReplicaFetcherManager/td
+  
tdkafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica/td
   tdlt replica.lag.max.messages/td
 /tr
 tr
   tdLag in messages per follower replica/td
-  
tdkafka.server:name=([-.\w]+)-ConsumerLag,type=FetcherLagMetrics/td
+  
tdkafka.server:type=FetcherLagMetrics,name=ConsumerLag,clientId=([-.\w]+),topic=([-.\w]+),partition=([0-9]+)/td
   tdlt replica.lag.max.messages/td
 /tr
 tr
   tdRequests waiting in the producer purgatory/td
-  
tdkafka.server:name=PurgatorySize,type=ProducerRequestPurgatory/td
+  tdkafka.server:type=ProducerRequestPurgatory,name=PurgatorySize/td
   tdnon-zero if ack=-1 is used/td
 /tr
 tr
   tdRequests waiting in the fetch purgatory/td
-  tdkafka.server:name=PurgatorySize,type=FetchRequestPurgatory/td
+  tdkafka.server:type=FetchRequestPurgatory,name=PurgatorySize/td
   tdsize depends on fetch.wait.max.ms in the consumer/td
 /tr
 tr
   tdRequest total time/td
-  
tdkafka.network:name={Produce|Fetch-Consumer|Fetch-Follower

kafka git commit: kafka-1797; (follow-up patch) add the serializer/deserializer api to the new java client; patched by Jun Rao; reviewed by Jay Kreps

2015-01-06 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 53329583a - f32e5ce24


kafka-1797; (follow-up patch) add the serializer/deserializer api to the new 
java client; patched by Jun Rao; reviewed by Jay Kreps


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/f32e5ce2
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/f32e5ce2
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/f32e5ce2

Branch: refs/heads/0.8.2
Commit: f32e5ce247eb07ce33cfcb876771293e8a2eacdc
Parents: 5332958
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 6 11:07:46 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 6 11:07:46 2015 -0800

--
 build.gradle|  1 +
 .../clients/consumer/ByteArrayDeserializer.java | 34 --
 .../kafka/clients/consumer/ConsumerConfig.java  |  4 +-
 .../kafka/clients/consumer/Deserializer.java| 38 ---
 .../kafka/clients/consumer/KafkaConsumer.java   | 37 +--
 .../clients/producer/ByteArraySerializer.java   | 34 --
 .../kafka/clients/producer/KafkaProducer.java   | 59 ++---
 .../kafka/clients/producer/ProducerConfig.java  |  4 +-
 .../kafka/clients/producer/Serializer.java  | 38 ---
 .../kafka/common/config/AbstractConfig.java | 13 ++--
 .../common/errors/DeserializationException.java | 47 --
 .../serialization/ByteArrayDeserializer.java| 34 ++
 .../serialization/ByteArraySerializer.java  | 34 ++
 .../common/serialization/Deserializer.java  | 45 +
 .../kafka/common/serialization/Serializer.java  | 45 +
 .../serialization/StringDeserializer.java   | 50 ++
 .../common/serialization/StringSerializer.java  | 50 ++
 .../common/serialization/SerializationTest.java | 68 
 .../kafka/producer/KafkaLog4jAppender.scala |  2 +
 .../scala/kafka/tools/ConsoleProducer.scala |  2 +
 .../main/scala/kafka/tools/MirrorMaker.scala|  7 +-
 .../scala/kafka/tools/ProducerPerformance.scala |  2 +
 .../scala/kafka/tools/ReplayLogProducer.scala   |  2 +
 .../scala/kafka/tools/TestEndToEndLatency.scala |  2 +
 .../scala/kafka/tools/TestLogCleaning.scala |  2 +
 .../kafka/api/ProducerCompressionTest.scala |  2 +
 .../kafka/api/ProducerSendTest.scala| 53 +++
 .../test/scala/unit/kafka/utils/TestUtils.scala |  2 +
 28 files changed, 492 insertions(+), 219 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/f32e5ce2/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 18f86e4..ba52288 100644
--- a/build.gradle
+++ b/build.gradle
@@ -371,6 +371,7 @@ project(':clients') {
   javadoc {
 include **/org/apache/kafka/clients/producer/*
 include **/org/apache/kafka/common/errors/*
+include **/org/apache/kafka/common/serialization/*
   }
 
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/f32e5ce2/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
deleted file mode 100644
index 514cbd2..000
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
- * to You under the Apache License, Version 2.0 (the License); you may not 
use this file except in compliance with the
- * License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License is distributed on
- * an AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 
express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- */
-
-package org.apache.kafka.clients.consumer;
-
-import java.util.Map;
-
-public class ByteArrayDeserializer implements Deserializerbyte[] {
-
-@Override
-public void configure(MapString, ? configs) {
-// nothing to do
-}
-
-@Override
-public byte[] deserialize(String topic, byte[] data, boolean isKey) {
-return data;
-}
-
-@Override
-public void close() {
-// nothing to do
-}
-}


kafka git commit: kafka-1797; (follow-up patch) add the serializer/deserializer api to the new java client; patched by Jun Rao; reviewed by Jay Kreps

2015-01-06 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 4471dc08b - 50b734690


kafka-1797; (follow-up patch) add the serializer/deserializer api to the new 
java client; patched by Jun Rao; reviewed by Jay Kreps


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/50b73469
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/50b73469
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/50b73469

Branch: refs/heads/trunk
Commit: 50b734690a93e58030f69cede8d0a84d1e3f5461
Parents: 4471dc0
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 6 11:07:46 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 6 11:40:26 2015 -0800

--
 build.gradle|  1 +
 .../clients/consumer/ByteArrayDeserializer.java | 34 --
 .../kafka/clients/consumer/ConsumerConfig.java  |  4 +-
 .../kafka/clients/consumer/Deserializer.java| 38 ---
 .../kafka/clients/consumer/KafkaConsumer.java   | 37 +--
 .../clients/producer/ByteArraySerializer.java   | 34 --
 .../kafka/clients/producer/KafkaProducer.java   | 59 ++---
 .../kafka/clients/producer/ProducerConfig.java  |  4 +-
 .../kafka/clients/producer/Serializer.java  | 38 ---
 .../kafka/common/config/AbstractConfig.java | 13 ++--
 .../common/errors/DeserializationException.java | 47 --
 .../serialization/ByteArrayDeserializer.java| 34 ++
 .../serialization/ByteArraySerializer.java  | 34 ++
 .../common/serialization/Deserializer.java  | 45 +
 .../kafka/common/serialization/Serializer.java  | 44 +
 .../serialization/StringDeserializer.java   | 50 ++
 .../common/serialization/StringSerializer.java  | 50 ++
 .../common/serialization/SerializationTest.java | 68 
 .../kafka/producer/KafkaLog4jAppender.scala |  2 +
 .../scala/kafka/tools/ConsoleProducer.scala |  2 +
 .../main/scala/kafka/tools/MirrorMaker.scala| 10 +--
 .../scala/kafka/tools/ProducerPerformance.scala |  2 +
 .../scala/kafka/tools/ReplayLogProducer.scala   |  2 +
 .../scala/kafka/tools/TestEndToEndLatency.scala |  2 +
 .../scala/kafka/tools/TestLogCleaning.scala |  2 +
 .../kafka/api/ProducerCompressionTest.scala |  2 +
 .../kafka/api/ProducerSendTest.scala| 53 +++
 .../test/scala/unit/kafka/utils/TestUtils.scala |  2 +
 28 files changed, 492 insertions(+), 221 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/50b73469/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 18f86e4..ba52288 100644
--- a/build.gradle
+++ b/build.gradle
@@ -371,6 +371,7 @@ project(':clients') {
   javadoc {
 include **/org/apache/kafka/clients/producer/*
 include **/org/apache/kafka/common/errors/*
+include **/org/apache/kafka/common/serialization/*
   }
 
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/50b73469/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
deleted file mode 100644
index 514cbd2..000
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ByteArrayDeserializer.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
- * to You under the Apache License, Version 2.0 (the License); you may not 
use this file except in compliance with the
- * License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License is distributed on
- * an AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 
express or implied. See the License for the
- * specific language governing permissions and limitations under the License.
- */
-
-package org.apache.kafka.clients.consumer;
-
-import java.util.Map;
-
-public class ByteArrayDeserializer implements Deserializerbyte[] {
-
-@Override
-public void configure(MapString, ? configs) {
-// nothing to do
-}
-
-@Override
-public byte[] deserialize(String topic, byte[] data, boolean isKey) {
-return data;
-}
-
-@Override
-public void close() {
-// nothing to do
-}
-}


kafka git commit: kafka-1797; (delta follow-up patch) add the serializer/deserializer api to the new java client; patched by Jun Rao; reviewed by Neha Narkhede

2015-01-06 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 50b734690 - 517503db2


kafka-1797; (delta follow-up patch) add the serializer/deserializer api to the 
new java client; patched by Jun Rao; reviewed by Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/517503db
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/517503db
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/517503db

Branch: refs/heads/trunk
Commit: 517503db2616531b08ee4d08d39c0e1c0bd19e97
Parents: 50b7346
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 6 12:10:04 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 6 12:10:04 2015 -0800

--
 .../org/apache/kafka/clients/producer/KafkaProducer.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/517503db/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index 91c672d..a61c56c 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -76,6 +76,7 @@ public class KafkaProducerK,V implements ProducerK,V {
 private final Time time;
 private final SerializerK keySerializer;
 private final SerializerV valueSerializer;
+private final ProducerConfig producerConfig;
 
 /**
  * A producer is instantiated by providing a set of key-value pairs as 
configuration. Valid configuration strings
@@ -152,6 +153,7 @@ public class KafkaProducerK,V implements ProducerK,V {
 
 private KafkaProducer(ProducerConfig config, SerializerK keySerializer, 
SerializerV valueSerializer) {
 log.trace(Starting the Kafka producer);
+this.producerConfig = config;
 this.time = new SystemTime();
 MetricConfig metricConfig = new 
MetricConfig().samples(config.getInt(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG))
   
.timeWindow(config.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG),
@@ -307,14 +309,16 @@ public class KafkaProducerK,V implements ProducerK,V {
 serializedKey = keySerializer.serialize(record.topic(), 
record.key());
 } catch (ClassCastException cce) {
 throw new SerializationException(Can't convert key of class  
+ record.key().getClass().getName() +
- to the one specified in key.serializer);
+ to class  + 
producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() +
+ specified in key.serializer);
 }
 byte[] serializedValue;
 try {
 serializedValue = valueSerializer.serialize(record.topic(), 
record.value());
 } catch (ClassCastException cce) {
 throw new SerializationException(Can't convert value of class 
 + record.value().getClass().getName() +
- to the one specified in value.serializer);
+ to class  + 
producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() 
+
+ specified in value.serializer);
 }
 ProducerRecord serializedRecord = new ProducerRecordbyte[], 
byte[](record.topic(), record.partition(), serializedKey, serializedValue);
 int partition = partitioner.partition(serializedRecord, 
metadata.fetch());



kafka git commit: kafka-1797; (delta follow-up patch) add the serializer/deserializer api to the new java client; patched by Jun Rao; reviewed by Neha Narkhede

2015-01-06 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 f32e5ce24 - fec9f32da


kafka-1797; (delta follow-up patch) add the serializer/deserializer api to the 
new java client; patched by Jun Rao; reviewed by Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/fec9f32d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/fec9f32d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/fec9f32d

Branch: refs/heads/0.8.2
Commit: fec9f32dafaaada396a045569c20e625e2a114e3
Parents: f32e5ce
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 6 12:52:39 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 6 12:52:39 2015 -0800

--
 .../org/apache/kafka/clients/producer/KafkaProducer.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/fec9f32d/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index db23a12..3053f27 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -76,6 +76,7 @@ public class KafkaProducerK,V implements ProducerK,V {
 private final Time time;
 private final SerializerK keySerializer;
 private final SerializerV valueSerializer;
+private final ProducerConfig producerConfig;
 
 /**
  * A producer is instantiated by providing a set of key-value pairs as 
configuration. Valid configuration strings
@@ -152,6 +153,7 @@ public class KafkaProducerK,V implements ProducerK,V {
 
 private KafkaProducer(ProducerConfig config, SerializerK keySerializer, 
SerializerV valueSerializer) {
 log.trace(Starting the Kafka producer);
+this.producerConfig = config;
 this.time = new SystemTime();
 MetricConfig metricConfig = new 
MetricConfig().samples(config.getInt(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG))
   
.timeWindow(config.getLong(ProducerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG),
@@ -307,14 +309,16 @@ public class KafkaProducerK,V implements ProducerK,V {
 serializedKey = keySerializer.serialize(record.topic(), 
record.key());
 } catch (ClassCastException cce) {
 throw new SerializationException(Can't convert key of class  
+ record.key().getClass().getName() +
- to the one specified in key.serializer);
+ to class  + 
producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() +
+ specified in key.serializer);
 }
 byte[] serializedValue;
 try {
 serializedValue = valueSerializer.serialize(record.topic(), 
record.value());
 } catch (ClassCastException cce) {
 throw new SerializationException(Can't convert value of class 
 + record.value().getClass().getName() +
- to the one specified in value.serializer);
+ to class  + 
producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() 
+
+ specified in value.serializer);
 }
 ProducerRecord serializedRecord = new ProducerRecordbyte[], 
byte[](record.topic(), record.partition(), serializedKey, serializedValue);
 int partition = partitioner.partition(serializedRecord, 
metadata.fetch());



kafka git commit: kafka-1711; WARN Property topic is not valid when running console producer; patched by Joe Crobak; reviewed by Jun Rao

2015-01-13 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 432d397af - 4e584deeb


kafka-1711; WARN Property topic is not valid when running console producer; 
patched by Joe Crobak; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/4e584dee
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/4e584dee
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/4e584dee

Branch: refs/heads/0.8.2
Commit: 4e584deeb652d9bc0b606f710e2bb137392f6f5f
Parents: 432d397
Author: Joe Crobak joec...@gmail.com
Authored: Tue Jan 13 09:15:22 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 13 09:15:22 2015 -0800

--
 core/src/main/scala/kafka/tools/ConsoleProducer.scala | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/4e584dee/core/src/main/scala/kafka/tools/ConsoleProducer.scala
--
diff --git a/core/src/main/scala/kafka/tools/ConsoleProducer.scala 
b/core/src/main/scala/kafka/tools/ConsoleProducer.scala
index a680b62..2a8e981 100644
--- a/core/src/main/scala/kafka/tools/ConsoleProducer.scala
+++ b/core/src/main/scala/kafka/tools/ConsoleProducer.scala
@@ -35,9 +35,11 @@ object ConsoleProducer {
 val config = new ProducerConfig(args)
 val reader = 
Class.forName(config.readerClass).newInstance().asInstanceOf[MessageReader]
 val props = new Properties
-props.put(topic, config.topic)
 props.putAll(config.cmdLineProps)
-reader.init(System.in, props)
+
+val readerProps = new Properties(props)
+readerProps.put(topic, config.topic)
+reader.init(System.in, readerProps)
 
 try {
 val producer =



kafka git commit: trivial change to add byte serializer to ProducerPerformance; patched by Jun Rao

2015-01-13 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk e79ebdfe2 - bfb2da3c8


trivial change to add byte serializer to ProducerPerformance; patched by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/bfb2da3c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/bfb2da3c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/bfb2da3c

Branch: refs/heads/trunk
Commit: bfb2da3c82ab01f5914d5559ee196d5aa977b17c
Parents: e79ebdf
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 13 09:54:32 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 13 09:54:54 2015 -0800

--
 .../org/apache/kafka/clients/tools/ProducerPerformance.java   | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/bfb2da3c/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java 
b/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
index 1b82800..689bae9 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
@@ -15,10 +15,7 @@ package org.apache.kafka.clients.tools;
 import java.util.Arrays;
 import java.util.Properties;
 
-import org.apache.kafka.clients.producer.Callback;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.clients.producer.*;
 
 public class ProducerPerformance {
 
@@ -46,6 +43,8 @@ public class ProducerPerformance {
 throw new IllegalArgumentException(Invalid property:  + 
args[i]);
 props.put(pieces[0], pieces[1]);
 }
+props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
org.apache.kafka.common.serialization.ByteArraySerializer);
+props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
org.apache.kafka.common.serialization.ByteArraySerializer);
 KafkaProducerbyte[], byte[] producer = new 
KafkaProducerbyte[],byte[](props);
 
 /* setup perf test */



kafka git commit: trivial change to add byte serializer to ProducerPerformance; patched by Jun Rao

2015-01-13 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 4e584deeb - d4629df1c


trivial change to add byte serializer to ProducerPerformance; patched by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d4629df1
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d4629df1
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d4629df1

Branch: refs/heads/0.8.2
Commit: d4629df1c25efe06a9e54ed96375884c5de69442
Parents: 4e584de
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 13 09:50:30 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 13 09:50:30 2015 -0800

--
 .../org/apache/kafka/clients/tools/ProducerPerformance.java   | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d4629df1/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java 
b/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
index 3a8d5f4..0eff203 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
@@ -15,10 +15,7 @@ package org.apache.kafka.clients.tools;
 import java.util.Arrays;
 import java.util.Properties;
 
-import org.apache.kafka.clients.producer.Callback;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.clients.producer.*;
 
 public class ProducerPerformance {
 
@@ -46,6 +43,8 @@ public class ProducerPerformance {
 throw new IllegalArgumentException(Invalid property:  + 
args[i]);
 props.put(pieces[0], pieces[1]);
 }
+props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
org.apache.kafka.common.serialization.ByteArraySerializer);
+props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
org.apache.kafka.common.serialization.ByteArraySerializer);
 KafkaProducerbyte[], byte[] producer = new KafkaProducerbyte[], 
byte[](props);
 
 /* setup perf test */



Git Push Summary

2015-01-13 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.0 [deleted] b1565f1a1


kafka git commit: change build version to 0.8.2.0

2015-01-13 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 d4629df1c - a643be62a


change build version to 0.8.2.0


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a643be62
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a643be62
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a643be62

Branch: refs/heads/0.8.2
Commit: a643be62afb23bca15ed2e88db9e76cb0ebfa906
Parents: d4629df
Author: Jun Rao jun...@gmail.com
Authored: Tue Jan 13 17:44:37 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jan 13 17:44:37 2015 -0800

--
 gradle.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a643be62/gradle.properties
--
diff --git a/gradle.properties b/gradle.properties
index 7cbd3de..773ed80 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 group=org.apache.kafka
-version=0.8.2-beta
+version=0.8.2.0
 scalaVersion=2.10.4
 task=build
 org.gradle.jvmargs=-XX:MaxPermSize=512m -Xmx1024m



Git Push Summary

2015-01-13 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.0 [created] b0c7d579f


Git Push Summary

2015-01-13 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.0 [created] b1565f1a1


kafka git commit: KAFKA-1841; OffsetCommitRequest API - timestamp field is not versioned; patched by Jun Rao; reviewed by Joel Koshy

2015-01-12 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 9b6744d3a - 432d397af


KAFKA-1841; OffsetCommitRequest API - timestamp field is not versioned; patched 
by Jun Rao; reviewed by Joel Koshy


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/432d397a
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/432d397a
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/432d397a

Branch: refs/heads/0.8.2
Commit: 432d397af8a1d4467fe8041bcff8790864010a80
Parents: 9b6744d
Author: Jun Rao jun...@gmail.com
Authored: Mon Jan 12 22:32:31 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 12 22:32:31 2015 -0800

--
 .../apache/kafka/common/protocol/Protocol.java  | 36 +++-
 .../common/requests/OffsetCommitRequest.java| 22 -
 .../scala/kafka/api/OffsetCommitRequest.scala   | 21 +++--
 .../scala/kafka/api/OffsetCommitResponse.scala  |  5 +-
 .../scala/kafka/api/OffsetFetchRequest.scala|  4 +-
 .../kafka/common/OffsetMetadataAndError.scala   |  2 +-
 .../src/main/scala/kafka/server/KafkaApis.scala | 96 
 .../api/RequestResponseSerializationTest.scala  |  4 +-
 8 files changed, 151 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/432d397a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java 
b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java
index 7517b87..f0a262e 100644
--- a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java
+++ b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java
@@ -111,6 +111,16 @@ public class Protocol {
  new 
Field(offset,

INT64,

Message offset to be committed.),
+ new 
Field(metadata,
+   
STRING,
+   
Any associated metadata the client wants to keep.));
+
+public static Schema OFFSET_COMMIT_REQUEST_PARTITION_V1 = new Schema(new 
Field(partition,
+   
INT32,
+   
Topic partition id.),
+ new 
Field(offset,
+   
INT64,
+   
Message offset to be committed.),
  new 
Field(timestamp,

INT64,

Timestamp of the commit),
@@ -125,6 +135,13 @@ public class Protocol {

  new ArrayOf(OFFSET_COMMIT_REQUEST_PARTITION_V0),

  Partitions to commit offsets.));
 
+public static Schema OFFSET_COMMIT_REQUEST_TOPIC_V1 = new Schema(new 
Field(topic,
+   
STRING,
+   
Topic to commit.),
+ new 
Field(partitions,
+   
new ArrayOf(OFFSET_COMMIT_REQUEST_PARTITION_V1),
+   
Partitions to commit offsets.));
+
 public static Schema OFFSET_COMMIT_REQUEST_V0 = new Schema(new 
Field(group_id,
  
STRING,
  The 
consumer group id.),
@@ -142,7 +159,7 @@ public class Protocol {
  
STRING,
  The 
consumer id assigned by the group coordinator.),
new 

kafka git commit: kafka-1797; (missed parametric in a few files) add the serializer/deserializer api to the new java client; patched by Jun Rao

2015-01-12 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 8d5d45904 - 828b808f9


kafka-1797; (missed parametric in a few files) add the serializer/deserializer 
api to the new java client; patched by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/828b808f
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/828b808f
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/828b808f

Branch: refs/heads/0.8.2
Commit: 828b808f98afcd0c670d144af07dc14fcf743edc
Parents: 8d5d459
Author: Jun Rao jun...@gmail.com
Authored: Mon Jan 12 21:21:16 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 12 21:37:00 2015 -0800

--
 .../kafka/clients/producer/MockProducer.java|  6 ++---
 .../clients/tools/ProducerPerformance.java  |  4 +--
 .../clients/producer/MockProducerTest.java  |  9 +++
 .../kafka/clients/producer/PartitionerTest.java | 27 +---
 .../kafka/api/ProducerFailureHandlingTest.scala |  2 +-
 5 files changed, 21 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/828b808f/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
index c0f1d57..00d1300 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
@@ -40,7 +40,7 @@ import org.apache.kafka.common.TopicPartition;
  * By default this mock will synchronously complete each send call 
successfully. However it can be configured to allow
  * the user to control the completion of the call and supply an optional error 
for the producer to throw.
  */
-public class MockProducer implements Producer {
+public class MockProducer implements Producerbyte[],byte[] {
 
 private final Cluster cluster;
 private final Partitioner partitioner = new Partitioner();
@@ -90,7 +90,7 @@ public class MockProducer implements Producer {
  * @see #history()
  */
 @Override
-public synchronized FutureRecordMetadata send(ProducerRecord record) {
+public synchronized FutureRecordMetadata send(ProducerRecordbyte[], 
byte[] record) {
 return send(record, null);
 }
 
@@ -100,7 +100,7 @@ public class MockProducer implements Producer {
  * @see #history()
  */
 @Override
-public synchronized FutureRecordMetadata send(ProducerRecord record, 
Callback callback) {
+public synchronized FutureRecordMetadata 
send(ProducerRecordbyte[],byte[] record, Callback callback) {
 int partition = 0;
 if (this.cluster.partitionsForTopic(record.topic()) != null)
 partition = partitioner.partition(record, this.cluster);

http://git-wip-us.apache.org/repos/asf/kafka/blob/828b808f/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java 
b/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
index ac86150..3a8d5f4 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/tools/ProducerPerformance.java
@@ -46,12 +46,12 @@ public class ProducerPerformance {
 throw new IllegalArgumentException(Invalid property:  + 
args[i]);
 props.put(pieces[0], pieces[1]);
 }
-KafkaProducer producer = new KafkaProducer(props);
+KafkaProducerbyte[], byte[] producer = new KafkaProducerbyte[], 
byte[](props);
 
 /* setup perf test */
 byte[] payload = new byte[recordSize];
 Arrays.fill(payload, (byte) 1);
-ProducerRecord record = new ProducerRecord(topicName, payload);
+ProducerRecordbyte[], byte[] record = new ProducerRecordbyte[], 
byte[](topicName, payload);
 long sleepTime = NS_PER_SEC / throughput;
 long sleepDeficitNs = 0;
 Stats stats = new Stats(numRecords, 5000);

http://git-wip-us.apache.org/repos/asf/kafka/blob/828b808f/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
--
diff --git 
a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java 
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
index 9a9411f..3676b05 100644
--- 
a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
+++ 

[2/2] kafka git commit: KAFKA-1723; make the metrics name in new producer more standard; patched by Manikumar Reddy; reviewed by Jay Kreps and Jun Rao

2015-01-12 Thread junrao
KAFKA-1723; make the metrics name in new producer more standard; patched by 
Manikumar Reddy; reviewed by Jay Kreps and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/688e38ce
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/688e38ce
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/688e38ce

Branch: refs/heads/trunk
Commit: 688e38ce45a7358a1e0bb359aa9b1a698a841619
Parents: 6f4dea9
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Mon Jan 12 22:01:43 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 12 22:02:02 2015 -0800

--
 build.gradle|   1 +
 .../apache/kafka/clients/consumer/Consumer.java |   5 +-
 .../kafka/clients/consumer/KafkaConsumer.java   |   3 +-
 .../kafka/clients/consumer/MockConsumer.java|   3 +-
 .../kafka/clients/producer/KafkaProducer.java   |  19 +-
 .../kafka/clients/producer/MockProducer.java|   7 +-
 .../apache/kafka/clients/producer/Producer.java |   4 +-
 .../clients/producer/internals/BufferPool.java  |  28 +--
 .../producer/internals/RecordAccumulator.java   |  25 +--
 .../clients/producer/internals/Sender.java  |  81 ++---
 .../java/org/apache/kafka/common/Metric.java|   9 +-
 .../org/apache/kafka/common/MetricName.java | 179 +++
 .../kafka/common/metrics/CompoundStat.java  |  14 +-
 .../kafka/common/metrics/JmxReporter.java   |  60 ---
 .../kafka/common/metrics/KafkaMetric.java   |  18 +-
 .../apache/kafka/common/metrics/Metrics.java|  58 ++
 .../org/apache/kafka/common/metrics/Sensor.java |  51 ++
 .../kafka/common/metrics/stats/Percentile.java  |  18 +-
 .../kafka/common/metrics/stats/Percentiles.java |   2 +-
 .../apache/kafka/common/network/Selector.java   |  93 ++
 .../kafka/clients/producer/BufferPoolTest.java  |  14 +-
 .../clients/producer/RecordAccumulatorTest.java |  16 +-
 .../kafka/clients/producer/SenderTest.java  |  11 +-
 .../kafka/common/metrics/JmxReporterTest.java   |  13 +-
 .../kafka/common/metrics/MetricsTest.java   |  87 +
 .../kafka/common/network/SelectorTest.java  |   3 +-
 .../org/apache/kafka/test/MetricsBench.java |  11 +-
 27 files changed, 533 insertions(+), 300 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/688e38ce/build.gradle
--
diff --git a/build.gradle b/build.gradle
index ba52288..c9ac433 100644
--- a/build.gradle
+++ b/build.gradle
@@ -370,6 +370,7 @@ project(':clients') {
 
   javadoc {
 include **/org/apache/kafka/clients/producer/*
+include **/org/apache/kafka/common/*
 include **/org/apache/kafka/common/errors/*
 include **/org/apache/kafka/common/serialization/*
   }

http://git-wip-us.apache.org/repos/asf/kafka/blob/688e38ce/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
index 1bce501..c0c636b 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
@@ -18,6 +18,7 @@ import java.util.Map;
 
 import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.MetricName;
 
 /**
  * @see KafkaConsumer
@@ -111,11 +112,11 @@ public interface ConsumerK,V extends Closeable {
  * @return The offsets for messages that were written to the server before 
the specified timestamp.
  */
 public MapTopicPartition, Long offsetsBeforeTime(long timestamp, 
CollectionTopicPartition partitions);
-
+
 /**
  * Return a map of metrics maintained by the consumer
  */
-public MapString, ? extends Metric metrics();
+public MapMetricName, ? extends Metric metrics();
 
 /**
  * Close this consumer

http://git-wip-us.apache.org/repos/asf/kafka/blob/688e38ce/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index a5fedce..76efc21 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -16,6 +16,7 @@ import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.metrics.JmxReporter;
 import 

[2/2] kafka git commit: KAFKA-1723; make the metrics name in new producer more standard; patched by Manikumar Reddy; reviewed by Jay Kreps and Jun Rao

2015-01-12 Thread junrao
KAFKA-1723; make the metrics name in new producer more standard; patched by 
Manikumar Reddy; reviewed by Jay Kreps and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/9b6744d3
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/9b6744d3
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/9b6744d3

Branch: refs/heads/0.8.2
Commit: 9b6744d3ae1587444b91e76cc5bdeb9d35f96760
Parents: 828b808
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Mon Jan 12 22:01:43 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 12 22:01:43 2015 -0800

--
 build.gradle|   1 +
 .../apache/kafka/clients/consumer/Consumer.java |   5 +-
 .../kafka/clients/consumer/KafkaConsumer.java   |   3 +-
 .../kafka/clients/consumer/MockConsumer.java|   3 +-
 .../kafka/clients/producer/KafkaProducer.java   |  19 +-
 .../kafka/clients/producer/MockProducer.java|   7 +-
 .../apache/kafka/clients/producer/Producer.java |   4 +-
 .../clients/producer/internals/BufferPool.java  |  28 +--
 .../producer/internals/RecordAccumulator.java   |  25 +--
 .../clients/producer/internals/Sender.java  |  81 ++---
 .../java/org/apache/kafka/common/Metric.java|   9 +-
 .../org/apache/kafka/common/MetricName.java | 179 +++
 .../kafka/common/metrics/CompoundStat.java  |  14 +-
 .../kafka/common/metrics/JmxReporter.java   |  60 ---
 .../kafka/common/metrics/KafkaMetric.java   |  18 +-
 .../apache/kafka/common/metrics/Metrics.java|  58 ++
 .../org/apache/kafka/common/metrics/Sensor.java |  51 ++
 .../kafka/common/metrics/stats/Percentile.java  |  18 +-
 .../kafka/common/metrics/stats/Percentiles.java |   2 +-
 .../apache/kafka/common/network/Selector.java   |  93 ++
 .../kafka/clients/producer/BufferPoolTest.java  |  14 +-
 .../clients/producer/RecordAccumulatorTest.java |  16 +-
 .../kafka/clients/producer/SenderTest.java  |  11 +-
 .../kafka/common/metrics/JmxReporterTest.java   |  13 +-
 .../kafka/common/metrics/MetricsTest.java   |  87 +
 .../kafka/common/network/SelectorTest.java  |   3 +-
 .../org/apache/kafka/test/MetricsBench.java |  11 +-
 27 files changed, 533 insertions(+), 300 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/9b6744d3/build.gradle
--
diff --git a/build.gradle b/build.gradle
index ba52288..c9ac433 100644
--- a/build.gradle
+++ b/build.gradle
@@ -370,6 +370,7 @@ project(':clients') {
 
   javadoc {
 include **/org/apache/kafka/clients/producer/*
+include **/org/apache/kafka/common/*
 include **/org/apache/kafka/common/errors/*
 include **/org/apache/kafka/common/serialization/*
   }

http://git-wip-us.apache.org/repos/asf/kafka/blob/9b6744d3/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
index 1bce501..c0c636b 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
@@ -18,6 +18,7 @@ import java.util.Map;
 
 import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.MetricName;
 
 /**
  * @see KafkaConsumer
@@ -111,11 +112,11 @@ public interface ConsumerK,V extends Closeable {
  * @return The offsets for messages that were written to the server before 
the specified timestamp.
  */
 public MapTopicPartition, Long offsetsBeforeTime(long timestamp, 
CollectionTopicPartition partitions);
-
+
 /**
  * Return a map of metrics maintained by the consumer
  */
-public MapString, ? extends Metric metrics();
+public MapMetricName, ? extends Metric metrics();
 
 /**
  * Close this consumer

http://git-wip-us.apache.org/repos/asf/kafka/blob/9b6744d3/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index 7f8a41c..9fad073 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -16,6 +16,7 @@ import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.metrics.JmxReporter;
 import 

[1/2] kafka git commit: KAFKA-1723; make the metrics name in new producer more standard; patched by Manikumar Reddy; reviewed by Jay Kreps and Jun Rao

2015-01-12 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 828b808f9 - 9b6744d3a


http://git-wip-us.apache.org/repos/asf/kafka/blob/9b6744d3/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
--
diff --git 
a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java 
b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
index 19bea0f..998a57c 100644
--- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
+++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java
@@ -16,9 +16,12 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.kafka.common.Metric;
+import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.metrics.stats.Avg;
 import org.apache.kafka.common.metrics.stats.Count;
 import org.apache.kafka.common.metrics.stats.Max;
@@ -39,21 +42,39 @@ public class MetricsTest {
 Metrics metrics = new Metrics(new MetricConfig(), 
Arrays.asList((MetricsReporter) new JmxReporter()), time);
 
 @Test
+public void testMetricName() {
+MetricName n1 = new MetricName(name, group, description, key1, 
value1);
+MapString, String tags = new HashMapString, String();
+tags.put(key1, value1);
+MetricName n2 = new MetricName(name, group, description, tags);
+assertEquals(metric names created in two different ways should be 
equal, n1, n2);
+
+try {
+new MetricName(name, group, description, key1);
+fail(Creating MetricName with an old number of keyValue should 
fail);
+} catch (IllegalArgumentException e) {
+// this is expected
+}
+}
+
+@Test
 public void testSimpleStats() throws Exception {
 ConstantMeasurable measurable = new ConstantMeasurable();
-metrics.addMetric(direct.measurable, measurable);
+
+metrics.addMetric(new MetricName(direct.measurable, grp1, The 
fraction of time an appender waits for space allocation.), measurable);
 Sensor s = metrics.sensor(test.sensor);
-s.add(test.avg, new Avg());
-s.add(test.max, new Max());
-s.add(test.min, new Min());
-s.add(test.rate, new Rate(TimeUnit.SECONDS));
-s.add(test.occurences, new Rate(TimeUnit.SECONDS, new Count()));
-s.add(test.count, new Count());
-s.add(new Percentiles(100, -100, 100, BucketSizing.CONSTANT, new 
Percentile(test.median, 50.0), new Percentile(test.perc99_9,
-   
  99.9)));
+s.add(new MetricName(test.avg, grp1), new Avg());
+s.add(new MetricName(test.max, grp1), new Max());
+s.add(new MetricName(test.min, grp1), new Min());
+s.add(new MetricName(test.rate, grp1), new Rate(TimeUnit.SECONDS));
+s.add(new MetricName(test.occurences, grp1), new 
Rate(TimeUnit.SECONDS, new Count()));
+s.add(new MetricName(test.count, grp1), new Count());
+s.add(new Percentiles(100, -100, 100, BucketSizing.CONSTANT,
+ new Percentile(new MetricName(test.median, 
grp1), 50.0),
+ new Percentile(new MetricName(test.perc99_9, 
grp1),99.9)));
 
 Sensor s2 = metrics.sensor(test.sensor2);
-s2.add(s2.total, new Total());
+s2.add(new MetricName(s2.total, grp1), new Total());
 s2.record(5.0);
 
 for (int i = 0; i  10; i++)
@@ -62,27 +83,27 @@ public class MetricsTest {
 // pretend 2 seconds passed...
 time.sleep(2000);
 
-assertEquals(s2 reflects the constant value, 5.0, 
metrics.metrics().get(s2.total).value(), EPS);
-assertEquals(Avg(0...9) = 4.5, 4.5, 
metrics.metrics().get(test.avg).value(), EPS);
-assertEquals(Max(0...9) = 9, 9.0, 
metrics.metrics().get(test.max).value(), EPS);
-assertEquals(Min(0...9) = 0, 0.0, 
metrics.metrics().get(test.min).value(), EPS);
-assertEquals(Rate(0...9) = 22.5, 22.5, 
metrics.metrics().get(test.rate).value(), EPS);
-assertEquals(Occurences(0...9) = 5, 5.0, 
metrics.metrics().get(test.occurences).value(), EPS);
-assertEquals(Count(0...9) = 10, 10.0, 
metrics.metrics().get(test.count).value(), EPS);
+assertEquals(s2 reflects the constant value, 5.0, 
metrics.metrics().get(new MetricName(s2.total, grp1)).value(), EPS);
+assertEquals(Avg(0...9) = 4.5, 4.5, metrics.metrics().get(new 
MetricName(test.avg, grp1)).value(), EPS);
+assertEquals(Max(0...9) = 9, 9.0, metrics.metrics().get(new 
MetricName(test.max, grp1)).value(), EPS);
+assertEquals(Min(0...9) = 0, 0.0, metrics.metrics().get(new 
MetricName(test.min, grp1)).value(), EPS);
+

kafka git commit: kafka-1797; (missed parametric in a few files) add the serializer/deserializer api to the new java client; patched by Jun Rao

2015-01-12 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 14779dddb - 6f4dea9db


kafka-1797; (missed parametric in a few files) add the serializer/deserializer 
api to the new java client; patched by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/6f4dea9d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/6f4dea9d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/6f4dea9d

Branch: refs/heads/trunk
Commit: 6f4dea9dbce5cc9f69a917182981b41a56a98a85
Parents: 14779dd
Author: Jun Rao jun...@gmail.com
Authored: Mon Jan 12 18:45:45 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jan 12 21:29:40 2015 -0800

--
 .../org/apache/kafka/clients/producer/KafkaProducer.java| 2 +-
 .../org/apache/kafka/clients/producer/MockProducerTest.java | 9 +++--
 core/src/main/scala/kafka/tools/MirrorMaker.scala   | 2 +-
 3 files changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/6f4dea9d/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index a61c56c..d3abeb1 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -320,7 +320,7 @@ public class KafkaProducerK,V implements ProducerK,V {
  to class  + 
producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() 
+
  specified in value.serializer);
 }
-ProducerRecord serializedRecord = new ProducerRecordbyte[], 
byte[](record.topic(), record.partition(), serializedKey, serializedValue);
+ProducerRecordbyte[], byte[] serializedRecord = new 
ProducerRecordbyte[], byte[](record.topic(), record.partition(), 
serializedKey, serializedValue);
 int partition = partitioner.partition(serializedRecord, 
metadata.fetch());
 int serializedSize = Records.LOG_OVERHEAD + 
Record.recordSize(serializedKey, serializedValue);
 ensureValidRecordSize(serializedSize);

http://git-wip-us.apache.org/repos/asf/kafka/blob/6f4dea9d/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
--
diff --git 
a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java 
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
index 1e2ca03..3676b05 100644
--- 
a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
+++ 
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
@@ -25,9 +25,6 @@ import static org.junit.Assert.fail;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
-import org.apache.kafka.clients.producer.MockProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
 import org.junit.Test;
 
 public class MockProducerTest {
@@ -37,7 +34,7 @@ public class MockProducerTest {
 @Test
 public void testAutoCompleteMock() throws Exception {
 MockProducer producer = new MockProducer(true);
-ProducerRecord record = new ProducerRecordbyte[], byte[](topic, 
key.getBytes(), value.getBytes());
+ProducerRecordbyte[], byte[] record = new ProducerRecordbyte[], 
byte[](topic, key.getBytes(), value.getBytes());
 FutureRecordMetadata metadata = producer.send(record);
 assertTrue(Send should be immediately complete, metadata.isDone());
 assertFalse(Send should be successful, isError(metadata));
@@ -51,8 +48,8 @@ public class MockProducerTest {
 @Test
 public void testManualCompletion() throws Exception {
 MockProducer producer = new MockProducer(false);
-ProducerRecord record1 = new ProducerRecordbyte[], byte[](topic, 
key1.getBytes(), value1.getBytes());
-ProducerRecord record2 = new ProducerRecordbyte[], byte[](topic, 
key2.getBytes(), value2.getBytes());
+ProducerRecordbyte[], byte[] record1 = new ProducerRecordbyte[], 
byte[](topic, key1.getBytes(), value1.getBytes());
+ProducerRecordbyte[], byte[] record2 = new ProducerRecordbyte[], 
byte[](topic, key2.getBytes(), value2.getBytes());
 FutureRecordMetadata md1 = producer.send(record1);
 assertFalse(Send shouldn't have completed, md1.isDone());
 FutureRecordMetadata md2 = producer.send(record2);


kafka git commit: change build version to 0.8.2.1

2015-02-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 45b9e1580 - cd8f0b87b


change build version to 0.8.2.1


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/cd8f0b87
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/cd8f0b87
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/cd8f0b87

Branch: refs/heads/0.8.2
Commit: cd8f0b87b9b02302b2fce494e04cc367647a2d2c
Parents: 45b9e15
Author: Jun Rao jun...@gmail.com
Authored: Wed Feb 18 17:55:57 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Wed Feb 18 17:55:57 2015 -0800

--
 gradle.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/cd8f0b87/gradle.properties
--
diff --git a/gradle.properties b/gradle.properties
index 773ed80..5f45930 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 group=org.apache.kafka
-version=0.8.2.0
+version=0.8.2.1
 scalaVersion=2.10.4
 task=build
 org.gradle.jvmargs=-XX:MaxPermSize=512m -Xmx1024m



Git Push Summary

2015-02-18 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.1 [created] c1b4c5853


svn commit: r8222 - in /release/kafka/0.8.2.1: ./ javadoc/ javadoc/org/ javadoc/org/apache/ javadoc/org/apache/kafka/ javadoc/org/apache/kafka/clients/ javadoc/org/apache/kafka/clients/producer/ javad

2015-03-09 Thread junrao
Author: junrao
Date: Mon Mar  9 18:29:48 2015
New Revision: 8222

Log:
Apache Kafka 0.8.2.1 release


[This commit notification would consist of 314 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]


svn commit: r1665965 - /kafka/site/downloads.html

2015-03-11 Thread junrao
Author: junrao
Date: Wed Mar 11 18:54:40 2015
New Revision: 1665965

URL: http://svn.apache.org/r1665965
Log:
add 0.8.2.1 download links

Modified:
kafka/site/downloads.html

Modified: kafka/site/downloads.html
URL: 
http://svn.apache.org/viewvc/kafka/site/downloads.html?rev=1665965r1=1665964r2=1665965view=diff
==
--- kafka/site/downloads.html (original)
+++ kafka/site/downloads.html Wed Mar 11 18:54:40 2015
@@ -1,10 +1,33 @@
 !--#include virtual=includes/header.html --
 
 h1Releases/h1
-0.8.2.0 is the latest release. The current stable version is 0.8.2.0. 
+0.8.2.1 is the latest release. The current stable version is 0.8.2.1. 
 
 p
 You can verify your download by following these a 
href=http://www.apache.org/info/verification.html;procedures/a and using 
these a href=http://kafka.apache.org/KEYS;KEYS/a.
+h30.8.2.1/h3
+ul
+  li
+a 
href=https://archive.apache.org/dist/kafka/0.8.2.1/RELEASE_NOTES.html;Release 
Notes/a
+  /li
+   li
+Source download: a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka-0.8.2.1-src.tgz;kafka-0.8.2.1-src.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka-0.8.2.1-src.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka-0.8.2.1-src.tgz.md5;md5/a)
+  /li
+   li
+Binary downloads:
+ul
+  liScala 2.9.1 - a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka_2.9.1-0.8.2.1.tgz;kafka_2.9.1-0.8.2.1.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.9.1-0.8.2.1.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.9.1-0.8.2.1.tgz.md5;md5/a)
+  /li
+  liScala 2.9.2 - a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka_2.9.2-0.8.2.1.tgz;kafka_2.9.2-0.8.2.1.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.9.2-0.8.2.1.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.9.2-0.8.2.1.tgz.md5;md5/a)
+  /li
+  liScala 2.10 nbsp;- a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka_2.10-0.8.2.1.tgz;kafka_2.10-0.8.2.1.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.10-0.8.2.1.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.10-0.8.2.1.tgz.md5;md5/a)
+  /li
+  liScala 2.11 nbsp;- a 
href=https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.1/kafka_2.11-0.8.2.1.tgz;kafka_2.11-0.8.2.1.tgz/a
 (a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.11-0.8.2.1.tgz.asc;asc/a,
 a 
href=https://dist.apache.org/repos/dist/release/kafka/0.8.2.1/kafka_2.11-0.8.2.1.tgz.md5;md5/a)
+  /li
+/ul
+We build for multiple versions of Scala. This only matters if you are using 
Scala and you want a version built for the same Scala version you use. 
Otherwise any version should work (2.10 is recommended).
+  /li
+/ul
 h30.8.2.0/h3
 ul
   li




kafka git commit: kafka-527; Compression support does numerous byte copies; patched by Yasuhiro Matsuda; reviewed by Guozhang Wang and Jun Rao

2015-03-25 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk eb2100876 - a74688de4


kafka-527; Compression support does numerous byte copies; patched by Yasuhiro 
Matsuda; reviewed by Guozhang Wang and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a74688de
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a74688de
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a74688de

Branch: refs/heads/trunk
Commit: a74688de46c23dd34ad964372fbed05c1f7f4b3e
Parents: eb21008
Author: Yasuhiro Matsuda yasuhiro.mats...@gmail.com
Authored: Wed Mar 25 13:08:38 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Wed Mar 25 13:08:38 2015 -0700

--
 .../kafka/message/ByteBufferMessageSet.scala|  65 +++---
 .../scala/kafka/message/MessageWriter.scala | 206 +++
 .../unit/kafka/message/MessageWriterTest.scala  | 130 
 3 files changed, 370 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a74688de/core/src/main/scala/kafka/message/ByteBufferMessageSet.scala
--
diff --git a/core/src/main/scala/kafka/message/ByteBufferMessageSet.scala 
b/core/src/main/scala/kafka/message/ByteBufferMessageSet.scala
index 9c69471..2d6cfc0 100644
--- a/core/src/main/scala/kafka/message/ByteBufferMessageSet.scala
+++ b/core/src/main/scala/kafka/message/ByteBufferMessageSet.scala
@@ -5,7 +5,7 @@
  * The ASF licenses this file to You under the Apache License, Version 2.0
  * (the License); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
- * 
+ *
  *http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
@@ -20,12 +20,12 @@ package kafka.message
 import kafka.utils.Logging
 import java.nio.ByteBuffer
 import java.nio.channels._
-import java.io.{InputStream, ByteArrayOutputStream, DataOutputStream}
+import java.io.{InputStream, DataOutputStream}
 import java.util.concurrent.atomic.AtomicLong
 import kafka.utils.IteratorTemplate
 
 object ByteBufferMessageSet {
-  
+
   private def create(offsetCounter: AtomicLong, compressionCodec: 
CompressionCodec, messages: Message*): ByteBuffer = {
 if(messages.size == 0) {
   MessageSet.Empty.buffer
@@ -36,52 +36,55 @@ object ByteBufferMessageSet {
   buffer.rewind()
   buffer
 } else {
-  val byteArrayStream = new 
ByteArrayOutputStream(MessageSet.messageSetSize(messages))
-  val output = new DataOutputStream(CompressionFactory(compressionCodec, 
byteArrayStream))
   var offset = -1L
-  try {
-for(message - messages) {
-  offset = offsetCounter.getAndIncrement
-  output.writeLong(offset)
-  output.writeInt(message.size)
-  output.write(message.buffer.array, message.buffer.arrayOffset, 
message.buffer.limit)
+  val messageWriter = new 
MessageWriter(math.min(math.max(MessageSet.messageSetSize(messages) / 2, 1024), 
1  16))
+  messageWriter.write(codec = compressionCodec) { outputStream =
+val output = new DataOutputStream(CompressionFactory(compressionCodec, 
outputStream))
+try {
+  for (message - messages) {
+offset = offsetCounter.getAndIncrement
+output.writeLong(offset)
+output.writeInt(message.size)
+output.write(message.buffer.array, message.buffer.arrayOffset, 
message.buffer.limit)
+  }
+} finally {
+  output.close()
 }
-  } finally {
-output.close()
   }
-  val bytes = byteArrayStream.toByteArray
-  val message = new Message(bytes, compressionCodec)
-  val buffer = ByteBuffer.allocate(message.size + MessageSet.LogOverhead)
-  writeMessage(buffer, message, offset)
+  val buffer = ByteBuffer.allocate(messageWriter.size + 
MessageSet.LogOverhead)
+  writeMessage(buffer, messageWriter, offset)
   buffer.rewind()
   buffer
 }
   }
-  
+
   def decompress(message: Message): ByteBufferMessageSet = {
-val outputStream: ByteArrayOutputStream = new ByteArrayOutputStream
+val outputStream = new 
BufferingOutputStream(math.min(math.max(message.size, 1024), 1  16))
 val inputStream: InputStream = new 
ByteBufferBackedInputStream(message.payload)
-val intermediateBuffer = new Array[Byte](1024)
 val compressed = CompressionFactory(message.compressionCodec, inputStream)
 try {
-  Stream.continually(compressed.read(intermediateBuffer)).takeWhile(_  
0).foreach { dataRead =
-outputStream.write(intermediateBuffer, 0, dataRead)
-  }
+  outputStream.write(compressed)
 } finally {
   compressed.close()
 }
 val outputBuffer = 

kafka git commit: kafka-2044; Support requests and responses from o.a.k.common in KafkaApis; patched by Gwen Shapira; reviewed by Jun Rao

2015-03-28 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk c5df2a8e3 - d8fe98efe


kafka-2044; Support requests and responses from o.a.k.common in KafkaApis; 
patched by Gwen Shapira; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d8fe98ef
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d8fe98ef
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d8fe98ef

Branch: refs/heads/trunk
Commit: d8fe98efee5a44ae12c1e3484fa20f89b0f30054
Parents: c5df2a8
Author: Gwen Shapira csh...@gmail.com
Authored: Sat Mar 28 08:39:48 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Sat Mar 28 08:39:48 2015 -0700

--
 checkstyle/import-control.xml   |  4 +-
 .../main/java/org/apache/kafka/common/Node.java |  4 ++
 .../kafka/common/requests/AbstractRequest.java  | 62 
 .../requests/ConsumerMetadataRequest.java   |  9 ++-
 .../kafka/common/requests/FetchRequest.java | 17 +-
 .../kafka/common/requests/FetchResponse.java|  3 +
 .../kafka/common/requests/HeartbeatRequest.java |  8 ++-
 .../kafka/common/requests/JoinGroupRequest.java |  8 ++-
 .../common/requests/ListOffsetRequest.java  | 15 -
 .../kafka/common/requests/MetadataRequest.java  | 14 -
 .../kafka/common/requests/MetadataResponse.java | 19 ++
 .../common/requests/OffsetCommitRequest.java| 13 +++-
 .../common/requests/OffsetFetchRequest.java | 17 +-
 .../common/requests/OffsetFetchResponse.java|  3 +
 .../kafka/common/requests/ProduceRequest.java   | 19 +-
 .../kafka/common/requests/ProduceResponse.java  |  2 +
 .../common/requests/RequestResponseTest.java| 34 +++
 .../kafka/api/HeartbeatRequestAndHeader.scala   | 45 --
 .../kafka/api/HeartbeatResponseAndHeader.scala  | 28 -
 .../kafka/api/JoinGroupRequestAndHeader.scala   | 45 --
 .../kafka/api/JoinGroupResponseAndHeader.scala  | 28 -
 core/src/main/scala/kafka/api/RequestKeys.scala |  4 +-
 .../kafka/network/BoundedByteBufferSend.scala   |  8 +++
 .../scala/kafka/network/RequestChannel.scala| 19 +-
 .../src/main/scala/kafka/server/KafkaApis.scala | 48 +--
 .../api/RequestResponseSerializationTest.scala  | 29 +
 26 files changed, 287 insertions(+), 218 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d8fe98ef/checkstyle/import-control.xml
--
diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml
index cca4b38..f2e6cec 100644
--- a/checkstyle/import-control.xml
+++ b/checkstyle/import-control.xml
@@ -66,6 +66,8 @@
subpackage name=requests
allow pkg=org.apache.kafka.common.protocol /
allow pkg=org.apache.kafka.common.network /
+   !-- for testing --
+   allow pkg=org.apache.kafka.common.errors /
/subpackage

subpackage name=serialization
@@ -97,4 +99,4 @@
allow pkg=org.apache.kafka /
/subpackage

-/import-control
\ No newline at end of file
+/import-control

http://git-wip-us.apache.org/repos/asf/kafka/blob/d8fe98ef/clients/src/main/java/org/apache/kafka/common/Node.java
--
diff --git a/clients/src/main/java/org/apache/kafka/common/Node.java 
b/clients/src/main/java/org/apache/kafka/common/Node.java
index 88c3b24..f4e4186 100644
--- a/clients/src/main/java/org/apache/kafka/common/Node.java
+++ b/clients/src/main/java/org/apache/kafka/common/Node.java
@@ -28,6 +28,10 @@ public class Node {
 this.port = port;
 }
 
+public static Node noNode() {
+return new Node(-1, , -1);
+}
+
 /**
  * The node id of this node
  */

http://git-wip-us.apache.org/repos/asf/kafka/blob/d8fe98ef/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java 
b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
new file mode 100644
index 000..5e5308e
--- /dev/null
+++ 
b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a 

kafka git commit: kafka-1971; starting a broker with a conflicting id will delete the previous broker registration; patched by Jun Rao; reviewed by Neha Narkhede

2015-02-23 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 3f1e08822 - 41189ea56


kafka-1971; starting a broker with a conflicting id will delete the previous 
broker registration; patched by Jun Rao; reviewed by Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/41189ea5
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/41189ea5
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/41189ea5

Branch: refs/heads/trunk
Commit: 41189ea5601837bdb697ade31f55e244abbe6d1c
Parents: 3f1e088
Author: Jun Rao jun...@gmail.com
Authored: Mon Feb 23 11:51:32 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Feb 23 11:51:32 2015 -0800

--
 .../scala/kafka/server/KafkaHealthcheck.scala   |  7 +---
 .../main/scala/kafka/server/KafkaServer.scala   |  2 -
 core/src/main/scala/kafka/utils/ZkUtils.scala   |  6 ---
 .../unit/kafka/server/ServerStartupTest.scala   | 42 ++--
 4 files changed, 31 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/41189ea5/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala 
b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
index 4acdd70..7907987 100644
--- a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
+++ b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
@@ -39,17 +39,12 @@ class KafkaHealthcheck(private val brokerId: Int,
 
   val brokerIdPath = ZkUtils.BrokerIdsPath + / + brokerId
   val sessionExpireListener = new SessionExpireListener
-  
+
   def startup() {
 zkClient.subscribeStateChanges(sessionExpireListener)
 register()
   }
 
-  def shutdown() {
-zkClient.unsubscribeStateChanges(sessionExpireListener)
-ZkUtils.deregisterBrokerInZk(zkClient, brokerId)
-  }
-
   /**
* Register this broker as alive in zookeeper
*/

http://git-wip-us.apache.org/repos/asf/kafka/blob/41189ea5/core/src/main/scala/kafka/server/KafkaServer.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala 
b/core/src/main/scala/kafka/server/KafkaServer.scala
index 7e5ddcb..426e522 100644
--- a/core/src/main/scala/kafka/server/KafkaServer.scala
+++ b/core/src/main/scala/kafka/server/KafkaServer.scala
@@ -310,8 +310,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = 
SystemTime) extends Logg
   if (canShutdown) {
 Utils.swallow(controlledShutdown())
 brokerState.newState(BrokerShuttingDown)
-if(kafkaHealthcheck != null)
-  Utils.swallow(kafkaHealthcheck.shutdown())
 if(socketServer != null)
   Utils.swallow(socketServer.shutdown())
 if(requestHandlerPool != null)

http://git-wip-us.apache.org/repos/asf/kafka/blob/41189ea5/core/src/main/scala/kafka/utils/ZkUtils.scala
--
diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala 
b/core/src/main/scala/kafka/utils/ZkUtils.scala
index c78a1b6..8a2fb2d 100644
--- a/core/src/main/scala/kafka/utils/ZkUtils.scala
+++ b/core/src/main/scala/kafka/utils/ZkUtils.scala
@@ -189,12 +189,6 @@ object ZkUtils extends Logging {
 info(Registered broker %d at path %s with address %s:%d..format(id, 
brokerIdPath, host, port))
   }
 
-  def deregisterBrokerInZk(zkClient: ZkClient, id: Int) {
-val brokerIdPath = ZkUtils.BrokerIdsPath + / + id
-deletePath(zkClient, brokerIdPath)
-info(Deregistered broker %d at path %s..format(id, brokerIdPath))
-  }
-
   def getConsumerPartitionOwnerPath(group: String, topic: String, partition: 
Int): String = {
 val topicDirs = new ZKGroupTopicDirs(group, topic)
 topicDirs.consumerOwnerDir + / + partition

http://git-wip-us.apache.org/repos/asf/kafka/blob/41189ea5/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala 
b/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
index 764655a..93af7df 100644
--- a/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
+++ b/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
@@ -26,26 +26,44 @@ import kafka.zk.ZooKeeperTestHarness
 import junit.framework.Assert._
 
 class ServerStartupTest extends JUnit3Suite with ZooKeeperTestHarness {
-  var server : KafkaServer = null
-  val brokerId = 0
-  val zookeeperChroot = /kafka-chroot-for-unittest
 
-  override def setUp() {
-super.setUp()
+  def testBrokerCreatesZKChroot {
+val brokerId = 0
+val zookeeperChroot = /kafka-chroot-for-unittest
 val props = 

kafka git commit: kafka-1971; starting a broker with a conflicting id will delete the previous broker registration; patched by Jun Rao; reviewed by Neha Narkhede

2015-02-23 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/0.8.2 bafecc936 - 5f3eb1caf


kafka-1971; starting a broker with a conflicting id will delete the previous 
broker registration; patched by Jun Rao; reviewed by Neha Narkhede


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/5f3eb1ca
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/5f3eb1ca
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/5f3eb1ca

Branch: refs/heads/0.8.2
Commit: 5f3eb1cafaecee746641fa43945cc0762635a0bd
Parents: bafecc9
Author: Jun Rao jun...@gmail.com
Authored: Mon Feb 23 12:13:21 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Mon Feb 23 12:13:21 2015 -0800

--
 .../scala/kafka/server/KafkaHealthcheck.scala   |  7 +---
 .../main/scala/kafka/server/KafkaServer.scala   |  2 -
 core/src/main/scala/kafka/utils/ZkUtils.scala   |  6 ---
 .../unit/kafka/server/ServerStartupTest.scala   | 43 +---
 4 files changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/5f3eb1ca/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala 
b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
index 4acdd70..7907987 100644
--- a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
+++ b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
@@ -39,17 +39,12 @@ class KafkaHealthcheck(private val brokerId: Int,
 
   val brokerIdPath = ZkUtils.BrokerIdsPath + / + brokerId
   val sessionExpireListener = new SessionExpireListener
-  
+
   def startup() {
 zkClient.subscribeStateChanges(sessionExpireListener)
 register()
   }
 
-  def shutdown() {
-zkClient.unsubscribeStateChanges(sessionExpireListener)
-ZkUtils.deregisterBrokerInZk(zkClient, brokerId)
-  }
-
   /**
* Register this broker as alive in zookeeper
*/

http://git-wip-us.apache.org/repos/asf/kafka/blob/5f3eb1ca/core/src/main/scala/kafka/server/KafkaServer.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala 
b/core/src/main/scala/kafka/server/KafkaServer.scala
index 1691ad7..5cd4c84 100644
--- a/core/src/main/scala/kafka/server/KafkaServer.scala
+++ b/core/src/main/scala/kafka/server/KafkaServer.scala
@@ -268,8 +268,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = 
SystemTime) extends Logg
   if (canShutdown) {
 Utils.swallow(controlledShutdown())
 brokerState.newState(BrokerShuttingDown)
-if(kafkaHealthcheck != null)
-  Utils.swallow(kafkaHealthcheck.shutdown())
 if(socketServer != null)
   Utils.swallow(socketServer.shutdown())
 if(requestHandlerPool != null)

http://git-wip-us.apache.org/repos/asf/kafka/blob/5f3eb1ca/core/src/main/scala/kafka/utils/ZkUtils.scala
--
diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala 
b/core/src/main/scala/kafka/utils/ZkUtils.scala
index 56e3e88..a6f4d46 100644
--- a/core/src/main/scala/kafka/utils/ZkUtils.scala
+++ b/core/src/main/scala/kafka/utils/ZkUtils.scala
@@ -179,12 +179,6 @@ object ZkUtils extends Logging {
 info(Registered broker %d at path %s with address %s:%d..format(id, 
brokerIdPath, host, port))
   }
 
-  def deregisterBrokerInZk(zkClient: ZkClient, id: Int) {
-val brokerIdPath = ZkUtils.BrokerIdsPath + / + id
-deletePath(zkClient, brokerIdPath)
-info(Deregistered broker %d at path %s..format(id, brokerIdPath))
-  }
-
   def getConsumerPartitionOwnerPath(group: String, topic: String, partition: 
Int): String = {
 val topicDirs = new ZKGroupTopicDirs(group, topic)
 topicDirs.consumerOwnerDir + / + partition

http://git-wip-us.apache.org/repos/asf/kafka/blob/5f3eb1ca/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala 
b/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
index a0ed485..93af7df 100644
--- a/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
+++ b/core/src/test/scala/unit/kafka/server/ServerStartupTest.scala
@@ -18,7 +18,6 @@
 package kafka.server
 
 import org.scalatest.junit.JUnit3Suite
-import kafka.zk
 import kafka.utils.ZkUtils
 import kafka.utils.Utils
 import kafka.utils.TestUtils
@@ -27,28 +26,44 @@ import kafka.zk.ZooKeeperTestHarness
 import junit.framework.Assert._
 
 class ServerStartupTest extends JUnit3Suite with ZooKeeperTestHarness {
-  var server : KafkaServer = null
-  val brokerId = 0
-  val zookeeperChroot = /kafka-chroot-for-unittest
 
-  

kafka git commit: kafka-1984; java producer may miss an available partition; patched by Jun Rao; reviewed by Ewen Cheslack-Postava, Jay Kreps, and Guozhang Wang

2015-02-24 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 7c82afddc - 10311c138


kafka-1984; java producer may miss an available partition; patched by Jun Rao; 
reviewed by Ewen Cheslack-Postava, Jay Kreps, and Guozhang Wang


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/10311c13
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/10311c13
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/10311c13

Branch: refs/heads/trunk
Commit: 10311c138923dcded5d8f57f587c7f7ef1119dac
Parents: 7c82afd
Author: Jun Rao jun...@gmail.com
Authored: Tue Feb 24 14:07:27 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Tue Feb 24 14:07:27 2015 -0800

--
 .../clients/producer/internals/Partitioner.java | 15 +-
 .../java/org/apache/kafka/common/Cluster.java   | 24 ++--
 .../kafka/clients/producer/PartitionerTest.java | 29 ++--
 3 files changed, 45 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/10311c13/clients/src/main/java/org/apache/kafka/clients/producer/internals/Partitioner.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Partitioner.java
 
b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Partitioner.java
index 8112e6d..dfb936d 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Partitioner.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Partitioner.java
@@ -56,14 +56,15 @@ public class Partitioner {
+ ].);
 return partition;
 } else if (key == null) {
-// choose the next available node in a round-robin fashion
-for (int i = 0; i  numPartitions; i++) {
-int part = Utils.abs(counter.getAndIncrement()) % 
numPartitions;
-if (partitions.get(part).leader() != null)
-return part;
+int nextValue = counter.getAndIncrement();
+ListPartitionInfo availablePartitions = 
cluster.availablePartitionsForTopic(topic);
+if (availablePartitions.size()  0) {
+int part = Utils.abs(nextValue) % availablePartitions.size();
+return availablePartitions.get(part).partition();
+} else {
+// no partitions are available, give a non-available partition
+return Utils.abs(nextValue) % numPartitions;
 }
-// no partitions are available, give a non-available partition
-return Utils.abs(counter.getAndIncrement()) % numPartitions;
 } else {
 // hash the key to choose a partition
 return Utils.abs(Utils.murmur2(key)) % numPartitions;

http://git-wip-us.apache.org/repos/asf/kafka/blob/10311c13/clients/src/main/java/org/apache/kafka/common/Cluster.java
--
diff --git a/clients/src/main/java/org/apache/kafka/common/Cluster.java 
b/clients/src/main/java/org/apache/kafka/common/Cluster.java
index 8fcd291..60594a7 100644
--- a/clients/src/main/java/org/apache/kafka/common/Cluster.java
+++ b/clients/src/main/java/org/apache/kafka/common/Cluster.java
@@ -25,6 +25,7 @@ public final class Cluster {
 private final ListNode nodes;
 private final MapTopicPartition, PartitionInfo 
partitionsByTopicPartition;
 private final MapString, ListPartitionInfo partitionsByTopic;
+private final MapString, ListPartitionInfo availablePartitionsByTopic;
 private final MapInteger, ListPartitionInfo partitionsByNode;
 private final MapInteger, Node nodesById;
 
@@ -68,8 +69,18 @@ public final class Cluster {
 }
 }
 this.partitionsByTopic = new HashMapString, 
ListPartitionInfo(partsForTopic.size());
-for (Map.EntryString, ListPartitionInfo entry : 
partsForTopic.entrySet())
-this.partitionsByTopic.put(entry.getKey(), 
Collections.unmodifiableList(entry.getValue()));
+this.availablePartitionsByTopic = new HashMapString, 
ListPartitionInfo(partsForTopic.size());
+for (Map.EntryString, ListPartitionInfo entry : 
partsForTopic.entrySet()) {
+String topic = entry.getKey();
+ListPartitionInfo partitionList = entry.getValue();
+this.partitionsByTopic.put(topic, 
Collections.unmodifiableList(partitionList));
+ListPartitionInfo availablePartitions = new 
ArrayListPartitionInfo();
+for (PartitionInfo part : partitionList) {
+if (part.leader() != null)
+availablePartitions.add(part);
+}
+this.availablePartitionsByTopic.put(topic, 

kafka git commit: kafka-1881; transient unit test failure in testDeleteTopicWithCleaner due to OOME; patched by Ewen Cheslack-Postava; reviewed by Jun Rao

2015-02-26 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk b8904e961 - ef252dea7


kafka-1881; transient unit test failure in testDeleteTopicWithCleaner due to 
OOME; patched by Ewen Cheslack-Postava; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/ef252dea
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/ef252dea
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/ef252dea

Branch: refs/heads/trunk
Commit: ef252dea704192a8055367d22e45f673f5b06e94
Parents: b8904e9
Author: Ewen Cheslack-Postava m...@ewencp.org
Authored: Thu Feb 26 15:03:12 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Thu Feb 26 15:03:12 2015 -0800

--
 core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/ef252dea/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala 
b/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
index 0cbd726..c8f336a 100644
--- a/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
+++ b/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
@@ -230,6 +230,8 @@ class DeleteTopicTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 brokerConfigs(0).setProperty(log.cleanup.policy,compact)
 brokerConfigs(0).setProperty(log.segment.bytes,100)
 brokerConfigs(0).setProperty(log.segment.delete.delay.ms,1000)
+brokerConfigs(0).setProperty(log.cleaner.dedupe.buffer.size,1048577)
+
 val servers = createTestTopicAndCluster(topic,brokerConfigs)
 
 // for simplicity, we are validating cleaner offsets on a single broker



Git Push Summary

2015-02-26 Thread junrao
Repository: kafka
Updated Tags:  refs/tags/0.8.2.1 [created] bd1bfb63e


kafka git commit: kafka-1400; transient unit test failure in SocketServerTest; patched by Jun Rao; reviewed by Ewen Cheslack-Postava and Jiangjie Qin

2015-02-26 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk ef252dea7 - e39373d16


kafka-1400; transient unit test failure in SocketServerTest; patched by Jun 
Rao; reviewed by Ewen Cheslack-Postava and Jiangjie Qin


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e39373d1
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e39373d1
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e39373d1

Branch: refs/heads/trunk
Commit: e39373d16bc208128d5d6cf0e0672f27f35e3b20
Parents: ef252de
Author: Jun Rao jun...@gmail.com
Authored: Thu Feb 26 18:02:19 2015 -0800
Committer: Jun Rao jun...@gmail.com
Committed: Thu Feb 26 18:02:19 2015 -0800

--
 core/src/test/scala/unit/kafka/network/SocketServerTest.scala | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e39373d1/core/src/test/scala/unit/kafka/network/SocketServerTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala 
b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala
index 78b431f..0af23ab 100644
--- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala
+++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala
@@ -139,8 +139,11 @@ class SocketServerTest extends JUnitSuite {
 processRequest(server.requestChannel)
 // then shutdown the server
 server.shutdown()
+
+val largeChunkOfBytes = new Array[Byte](100)
 // doing a subsequent send should throw an exception as the connection 
should be closed.
-sendRequest(socket, 0, bytes)
+// send a large chunk of bytes to trigger a socket flush
+sendRequest(socket, 0, largeChunkOfBytes)
   }
 
   @Test



kafka git commit: kafka-2013; benchmark test for the purgatory; patched by Yasuhiro Matsuda; reviewed by Jun Rao

2015-04-01 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 66c6f9b1c - 619d78eb5


kafka-2013; benchmark test for the purgatory; patched by Yasuhiro Matsuda; 
reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/619d78eb
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/619d78eb
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/619d78eb

Branch: refs/heads/trunk
Commit: 619d78eb521185f365a002e5f1987b75d3f35192
Parents: 66c6f9b
Author: Yasuhiro Matsuda yasuhiro.mats...@gmail.com
Authored: Wed Apr 1 16:14:48 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Wed Apr 1 16:14:48 2015 -0700

--
 .../other/kafka/TestPurgatoryPerformance.scala  | 275 +++
 1 file changed, 275 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/619d78eb/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
--
diff --git a/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala 
b/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
new file mode 100644
index 000..962253a
--- /dev/null
+++ b/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import java.lang.management.ManagementFactory
+import java.util.Random
+import java.util.concurrent._
+
+import joptsimple._
+import kafka.server.{DelayedOperationPurgatory, DelayedOperation}
+import kafka.utils._
+
+import scala.math._
+import scala.collection.JavaConversions._
+
+/**
+ * This is a benchmark test of the purgatory.
+ */
+object TestPurgatoryPerformance {
+
+  def main(args: Array[String]): Unit = {
+val parser = new OptionParser
+val numRequestsOpt = parser.accepts(num, The number of requests)
+  .withRequiredArg
+  .describedAs(num_requests)
+  .ofType(classOf[java.lang.Double])
+val requestRateOpt = parser.accepts(rate, The request rate)
+  .withRequiredArg
+  .describedAs(request_per_second)
+  .ofType(classOf[java.lang.Double])
+val requestDataSizeOpt = parser.accepts(size, The request data size)
+  .withRequiredArg
+  .describedAs(num_bytes)
+  .ofType(classOf[java.lang.Long])
+val numKeysOpt = parser.accepts(keys, The number of keys)
+  .withRequiredArg
+  .describedAs(num_keys)
+  .ofType(classOf[java.lang.Integer])
+  .defaultsTo(3)
+val timeoutOpt = parser.accepts(timeout, The request timeout)
+  .withRequiredArg
+  .describedAs(timeout_milliseconds)
+  .ofType(classOf[java.lang.Long])
+val pct75Opt = parser.accepts(pct75, 75th percentile of request latency 
(log-normal distribution))
+  .withRequiredArg
+  .describedAs(75th_percentile)
+  .ofType(classOf[java.lang.Double])
+val pct50Opt = parser.accepts(pct50, 50th percentile of request latency 
(log-normal distribution))
+  .withRequiredArg
+  .describedAs(50th_percentile)
+  .ofType(classOf[java.lang.Double])
+val verboseOpt = parser.accepts(verbose, show additional information)
+  .withRequiredArg
+  .describedAs(true|false)
+  .ofType(classOf[java.lang.Boolean])
+  .defaultsTo(true)
+
+val options = parser.parse(args: _*)
+
+CommandLineUtils.checkRequiredArgs(parser, options, numRequestsOpt, 
requestRateOpt, requestDataSizeOpt, pct75Opt, pct50Opt)
+
+val numRequests = options.valueOf(numRequestsOpt).intValue
+val requestRate = options.valueOf(requestRateOpt).doubleValue
+val requestDataSize = options.valueOf(requestDataSizeOpt).intValue
+val numKeys = options.valueOf(numKeysOpt).intValue
+val timeout = options.valueOf(timeoutOpt).longValue
+val pct75 = options.valueOf(pct75Opt).doubleValue
+val pct50 = options.valueOf(pct50Opt).doubleValue
+val verbose = options.valueOf(verboseOpt).booleanValue
+
+val gcMXBeans = 
ManagementFactory.getGarbageCollectorMXBeans().sortBy(_.getName)
+val osMXBean = try {
+  

kafka git commit: kafka-2016; RollingBounceTest takes long; patched by Ted Malaska; reviewed by Jun Rao

2015-04-02 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 619d78eb5 - ad722531d


kafka-2016; RollingBounceTest takes long; patched by Ted Malaska; reviewed by 
Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/ad722531
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/ad722531
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/ad722531

Branch: refs/heads/trunk
Commit: ad722531daafadbcb27b0c0db0b9fcbb68b25b28
Parents: 619d78e
Author: Ted Malaska ted.mala...@cloudera.com
Authored: Thu Apr 2 18:20:54 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Apr 2 18:20:54 2015 -0700

--
 core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala | 1 -
 core/src/test/scala/unit/kafka/utils/TestUtils.scala   | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/ad722531/core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala 
b/core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala
index 4d27e41..f74e716 100644
--- a/core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala
+++ b/core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala
@@ -40,7 +40,6 @@ class RollingBounceTest extends JUnit3Suite with 
ZooKeeperTestHarness {
   val configProps2 = TestUtils.createBrokerConfig(brokerId2, port2)
   val configProps3 = TestUtils.createBrokerConfig(brokerId3, port3)
   val configProps4 = TestUtils.createBrokerConfig(brokerId4, port4)
-  configProps4.put(controlled.shutdown.retry.backoff.ms, 100)
 
   var servers: Seq[KafkaServer] = Seq.empty[KafkaServer]
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/ad722531/core/src/test/scala/unit/kafka/utils/TestUtils.scala
--
diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala 
b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
index 1682a77..bb4daad 100644
--- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala
+++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
@@ -167,6 +167,7 @@ object TestUtils extends Logging {
 props.put(controller.socket.timeout.ms, 1500)
 props.put(controlled.shutdown.enable, enableControlledShutdown.toString)
 props.put(delete.topic.enable, enableDeleteTopic.toString)
+props.put(controlled.shutdown.retry.backoff.ms, 100)
 props
   }
 



kafka git commit: kafka-2096; Enable keepalive socket option for broker to prevent socket leak; patched by Allen Wang; reviewed by Jun Rao

2015-04-08 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 6880f66c9 - 194d1fc03


kafka-2096; Enable keepalive socket option for broker to prevent socket leak; 
patched by Allen Wang; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/194d1fc0
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/194d1fc0
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/194d1fc0

Branch: refs/heads/trunk
Commit: 194d1fc030561fd06e39224584b58cc49e2f63e8
Parents: 6880f66
Author: Allen Wang ax_w...@yahoo.com
Authored: Wed Apr 8 17:21:29 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Wed Apr 8 17:21:29 2015 -0700

--
 core/src/main/scala/kafka/network/SocketServer.scala | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/194d1fc0/core/src/main/scala/kafka/network/SocketServer.scala
--
diff --git a/core/src/main/scala/kafka/network/SocketServer.scala 
b/core/src/main/scala/kafka/network/SocketServer.scala
old mode 100755
new mode 100644
index 8fbea7b..c5fec00
--- a/core/src/main/scala/kafka/network/SocketServer.scala
+++ b/core/src/main/scala/kafka/network/SocketServer.scala
@@ -308,6 +308,7 @@ private[kafka] class Acceptor(val host: String,
   connectionQuotas.inc(socketChannel.socket().getInetAddress)
   socketChannel.configureBlocking(false)
   socketChannel.socket().setTcpNoDelay(true)
+  socketChannel.socket().setKeepAlive(true)
   socketChannel.socket().setSendBufferSize(sendBufferSize)
 
   debug(Accepted connection from %s on %s. sendBufferSize 
[actual|requested]: [%d|%d] recvBufferSize [actual|requested]: [%d|%d]



kafka git commit: kafka-1989; New purgatory design; patched by Yasuhiro Matsuda; reviewed by Guozhang Wang and Jun Rao

2015-04-08 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk d3aa99c54 - 6880f66c9


kafka-1989; New purgatory design; patched by Yasuhiro Matsuda; reviewed by 
Guozhang Wang and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/6880f66c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/6880f66c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/6880f66c

Branch: refs/heads/trunk
Commit: 6880f66c97f63b2d1f3750f1753ec8b6094cb8a5
Parents: d3aa99c
Author: Yasuhiro Matsuda yasuhiro.mats...@gmail.com
Authored: Wed Apr 8 15:19:59 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Wed Apr 8 15:19:59 2015 -0700

--
 .../scala/kafka/server/DelayedOperation.scala   | 159 +-
 .../scala/kafka/server/ReplicaManager.scala |   1 -
 .../main/scala/kafka/utils/timer/Timer.scala|  86 ++
 .../scala/kafka/utils/timer/TimerTask.scala |  43 +
 .../scala/kafka/utils/timer/TimerTaskList.scala | 132 +++
 .../scala/kafka/utils/timer/TimingWheel.scala   | 160 +++
 .../kafka/server/DelayedOperationTest.scala |  45 +++---
 .../kafka/utils/timer/TimerTaskListTest.scala   |  95 +++
 .../unit/kafka/utils/timer/TimerTest.scala  | 111 +
 9 files changed, 723 insertions(+), 109 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/6880f66c/core/src/main/scala/kafka/server/DelayedOperation.scala
--
diff --git a/core/src/main/scala/kafka/server/DelayedOperation.scala 
b/core/src/main/scala/kafka/server/DelayedOperation.scala
index e317676..2ed9b46 100644
--- a/core/src/main/scala/kafka/server/DelayedOperation.scala
+++ b/core/src/main/scala/kafka/server/DelayedOperation.scala
@@ -18,11 +18,15 @@
 package kafka.server
 
 import kafka.utils._
+import kafka.utils.timer._
 import kafka.metrics.KafkaMetricsGroup
 
-import java.util
+import java.util.LinkedList
 import java.util.concurrent._
 import java.util.concurrent.atomic._
+
+import org.apache.kafka.common.utils.Utils
+
 import scala.collection._
 
 import com.yammer.metrics.core.Gauge
@@ -41,7 +45,10 @@ import com.yammer.metrics.core.Gauge
  *
  * A subclass of DelayedOperation needs to provide an implementation of both 
onComplete() and tryComplete().
  */
-abstract class DelayedOperation(delayMs: Long) extends DelayedItem(delayMs) {
+abstract class DelayedOperation(delayMs: Long) extends TimerTask with Logging {
+
+  override val expirationMs = delayMs + System.currentTimeMillis()
+
   private val completed = new AtomicBoolean(false)
 
   /*
@@ -58,6 +65,8 @@ abstract class DelayedOperation(delayMs: Long) extends 
DelayedItem(delayMs) {
*/
   def forceComplete(): Boolean = {
 if (completed.compareAndSet(false, true)) {
+  // cancel the timeout timer
+  cancel()
   onComplete()
   true
 } else {
@@ -71,7 +80,7 @@ abstract class DelayedOperation(delayMs: Long) extends 
DelayedItem(delayMs) {
   def isCompleted(): Boolean = completed.get()
 
   /**
-   * Call-back to execute when a delayed operation expires, but before 
completion.
+   * Call-back to execute when a delayed operation gets expired and hence 
forced to complete.
*/
   def onExpiration(): Unit
 
@@ -89,6 +98,14 @@ abstract class DelayedOperation(delayMs: Long) extends 
DelayedItem(delayMs) {
* This function needs to be defined in subclasses
*/
   def tryComplete(): Boolean
+
+  /*
+   * run() method defines a task that is executed on timeout
+   */
+  override def run(): Unit = {
+if (forceComplete())
+  onExpiration()
+  }
 }
 
 /**
@@ -97,11 +114,21 @@ abstract class DelayedOperation(delayMs: Long) extends 
DelayedItem(delayMs) {
 class DelayedOperationPurgatory[T : DelayedOperation](purgatoryName: String, 
brokerId: Int = 0, purgeInterval: Int = 1000)
 extends Logging with KafkaMetricsGroup {
 
+  // timeout timer
+  private[this] val executor = Executors.newFixedThreadPool(1, new 
ThreadFactory() {
+def newThread(runnable: Runnable): Thread =
+  Utils.newThread(executor-+purgatoryName, runnable, false)
+  })
+  private[this] val timeoutTimer = new Timer(executor)
+
   /* a list of operation watching keys */
   private val watchersForKey = new Pool[Any, Watchers](Some((key: Any) = new 
Watchers))
 
+  // the number of estimated total operations in the purgatory
+  private[this] val estimatedTotalOperations = new AtomicInteger(0)
+
   /* background thread expiring operations that have timed out */
-  private val expirationReaper = new ExpiredOperationReaper
+  private val expirationReaper = new ExpiredOperationReaper()
 
   private val metricsTags = Map(delayedOperation - purgatoryName)
 
@@ -153,12 +180,18 @@ class DelayedOperationPurgatory[T : 

kafka git commit: kafka-2118; Cleaner cannot clean after shutdown during replaceSegments; patched by Rajini Sivaram; reviewed by Jun Rao

2015-04-26 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk ba3e08958 - dbfe8c0a7


kafka-2118; Cleaner cannot clean after shutdown during replaceSegments; patched 
by Rajini Sivaram; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/dbfe8c0a
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/dbfe8c0a
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/dbfe8c0a

Branch: refs/heads/trunk
Commit: dbfe8c0a7dfea65e9f32e6157da1c9a3ce256171
Parents: ba3e089
Author: Rajini Sivaram rajinisiva...@googlemail.com
Authored: Sun Apr 26 19:17:15 2015 -0500
Committer: Jun Rao jun...@gmail.com
Committed: Sun Apr 26 19:17:15 2015 -0500

--
 core/src/main/scala/kafka/log/Log.scala | 55 +--
 .../test/scala/unit/kafka/log/CleanerTest.scala | 99 
 2 files changed, 144 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/dbfe8c0a/core/src/main/scala/kafka/log/Log.scala
--
diff --git a/core/src/main/scala/kafka/log/Log.scala 
b/core/src/main/scala/kafka/log/Log.scala
old mode 100755
new mode 100644
index 5563f2d..84e7b8f
--- a/core/src/main/scala/kafka/log/Log.scala
+++ b/core/src/main/scala/kafka/log/Log.scala
@@ -122,9 +122,10 @@ class Log(val dir: File,
   private def loadSegments() {
 // create the log directory if it doesn't exist
 dir.mkdirs()
+var swapFiles = Set[File]()
 
 // first do a pass through the files in the log directory and remove any 
temporary files 
-// and complete any interrupted swap operations
+// and find any interrupted swap operations
 for(file - dir.listFiles if file.isFile) {
   if(!file.canRead)
 throw new IOException(Could not read file  + file)
@@ -134,7 +135,7 @@ class Log(val dir: File,
 file.delete()
   } else if(filename.endsWith(SwapFileSuffix)) {
 // we crashed in the middle of a swap operation, to recover:
-// if a log, swap it in and delete the .index file
+// if a log, delete the .index file, complete the swap operation later
 // if an index just delete it, it will be rebuilt
 val baseName = new File(CoreUtils.replaceSuffix(file.getPath, 
SwapFileSuffix, ))
 if(baseName.getPath.endsWith(IndexFileSuffix)) {
@@ -143,12 +144,7 @@ class Log(val dir: File,
   // delete the index
   val index = new File(CoreUtils.replaceSuffix(baseName.getPath, 
LogFileSuffix, IndexFileSuffix))
   index.delete()
-  // complete the swap operation
-  val renamed = file.renameTo(baseName)
-  if(renamed)
-info(Found log file %s from interrupted swap operation, 
repairing..format(file.getPath))
-  else
-throw new KafkaException(Failed to rename file 
%s..format(file.getPath))
+  swapFiles += file
 }
   }
 }
@@ -180,6 +176,27 @@ class Log(val dir: File,
 segments.put(start, segment)
   }
 }
+
+// Finally, complete any interrupted swap operations. To be crash-safe,
+// log files that are replaced by the swap segment should be renamed to 
.deleted
+// before the swap file is restored as the new segment file.
+for (swapFile - swapFiles) {
+  val logFile = new File(CoreUtils.replaceSuffix(swapFile.getPath, 
SwapFileSuffix, ))
+  val fileName = logFile.getName
+  val startOffset = fileName.substring(0, fileName.length - 
LogFileSuffix.length).toLong
+  val indexFile = new File(CoreUtils.replaceSuffix(logFile.getPath, 
LogFileSuffix, IndexFileSuffix) + SwapFileSuffix)
+  val index =  new OffsetIndex(file = indexFile, baseOffset = startOffset, 
maxIndexSize = config.maxIndexSize)
+  val swapSegment = new LogSegment(new FileMessageSet(file = swapFile),
+   index = index,
+   baseOffset = startOffset,
+   indexIntervalBytes = 
config.indexInterval,
+   rollJitterMs = 
config.randomSegmentJitter,
+   time = time)
+  info(Found log file %s from interrupted swap operation, 
repairing..format(swapFile.getPath))
+  swapSegment.recover(config.maxMessageSize)
+  val oldSegments = logSegments(swapSegment.baseOffset, 
swapSegment.nextOffset)
+  replaceSegments(swapSegment, oldSegments.toSeq, isRecoveredSwapFile = 
true)
+}
 
 if(logSegments.size == 0) {
   // no existing segments, create a new mutable segment beginning at 
offset 0
@@ -748,14 +765,32 @@ class Log(val dir: File,
* Swap a new segment in place and delete one or more existing segments in a 
crash-safe manner. The old segments will
* 

kafka git commit: kafka-2113; TestPurgatoryPerformance does not compile using IBM JDK; patched by Rajini Sivaram; reviewed by Yasuhiro Matsuda and Jun Rao

2015-04-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk bfbd3acbf - 5397d3c53


kafka-2113; TestPurgatoryPerformance does not compile using IBM JDK; patched by 
Rajini Sivaram; reviewed by Yasuhiro Matsuda and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/5397d3c5
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/5397d3c5
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/5397d3c5

Branch: refs/heads/trunk
Commit: 5397d3c53e028a745f9d6dbfe8fc84c859656de0
Parents: bfbd3ac
Author: Rajini Sivaram rajinisiva...@googlemail.com
Authored: Thu Apr 16 10:26:49 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Apr 16 10:26:49 2015 -0700

--
 .../other/kafka/TestPurgatoryPerformance.scala  | 24 ++--
 1 file changed, 17 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/5397d3c5/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
--
diff --git a/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala 
b/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
index 962253a..39d6d8a 100644
--- a/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
+++ b/core/src/test/scala/other/kafka/TestPurgatoryPerformance.scala
@@ -18,6 +18,7 @@
 package kafka
 
 import java.lang.management.ManagementFactory
+import java.lang.management.OperatingSystemMXBean
 import java.util.Random
 import java.util.concurrent._
 
@@ -84,11 +85,7 @@ object TestPurgatoryPerformance {
 val verbose = options.valueOf(verboseOpt).booleanValue
 
 val gcMXBeans = 
ManagementFactory.getGarbageCollectorMXBeans().sortBy(_.getName)
-val osMXBean = try {
-  
Some(ManagementFactory.getOperatingSystemMXBean().asInstanceOf[com.sun.management.OperatingSystemMXBean])
-} catch {
-  case _: Throwable = None
-}
+val osMXBean = ManagementFactory.getOperatingSystemMXBean
 val latencySamples = new LatencySamples(100, pct75, pct50)
 val intervalSamples = new IntervalSamples(100, requestRate)
 
@@ -97,7 +94,7 @@ object TestPurgatoryPerformance {
 
 val gcNames = gcMXBeans.map(_.getName)
 
-val initialCpuTimeNano = osMXBean.map(x = x.getProcessCpuTime)
+val initialCpuTimeNano = getProcessCpuTimeNanos(osMXBean)
 val latch = new CountDownLatch(numRequests)
 val start = System.currentTimeMillis
 val keys = (0 until numKeys).map(i = fakeKey%d.format(i))
@@ -142,7 +139,7 @@ object TestPurgatoryPerformance {
 val targetRate = numRequests.toDouble * 1000d / (requestArrivalTime - 
start).toDouble
 val actualRate = numRequests.toDouble * 1000d / (end - start).toDouble
 
-val cpuTime = osMXBean.map(x = (x.getProcessCpuTime - 
initialCpuTimeNano.get) / 100L)
+val cpuTime = getProcessCpuTimeNanos(osMXBean).map(x = (x - 
initialCpuTimeNano.get) / 100L)
 val gcCounts = gcMXBeans.map(_.getCollectionCount)
 val gcTimes = gcMXBeans.map(_.getCollectionTime)
 
@@ -151,6 +148,19 @@ object TestPurgatoryPerformance {
 purgatory.shutdown()
   }
 
+  // Use JRE-specific class to get process CPU time
+  private def getProcessCpuTimeNanos(osMXBean : OperatingSystemMXBean) = {
+try {
+  
Some(Class.forName(com.sun.management.OperatingSystemMXBean).getMethod(getProcessCpuTime).invoke(osMXBean).asInstanceOf[Long])
+} catch {
+  case _: Throwable = try {
+
Some(Class.forName(com.ibm.lang.management.OperatingSystemMXBean).getMethod(getProcessCpuTimeByNS).invoke(osMXBean).asInstanceOf[Long])
+  } catch {
+case _: Throwable = None
+  }
+}
+  }
+
   // log-normal distribution 
(http://en.wikipedia.org/wiki/Log-normal_distribution)
   //   mu: the mean of the underlying normal distribution (not the mean of 
this log-normal distribution)
   //   sigma: the standard deviation of the underlying normal distribution 
(not the stdev of this log-normal distribution)



kafka git commit: kafka-2043; CompressionType is passed in each RecordAccumulator append; patched by Grant Henke; reviewed by Jun Rao

2015-04-06 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 9c23d9355 - 75e1cc8bc


kafka-2043; CompressionType is passed in each RecordAccumulator append; patched 
by Grant Henke; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/75e1cc8b
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/75e1cc8b
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/75e1cc8b

Branch: refs/heads/trunk
Commit: 75e1cc8bc497e6aaa0dd05454d6c817ed0fb5e23
Parents: 9c23d93
Author: Grant Henke granthe...@gmail.com
Authored: Mon Apr 6 13:34:31 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Mon Apr 6 13:34:31 2015 -0700

--
 .../kafka/clients/producer/KafkaProducer.java   |  3 +-
 .../producer/internals/RecordAccumulator.java   |  7 ++--
 .../internals/RecordAccumulatorTest.java| 34 ++--
 .../clients/producer/internals/SenderTest.java  |  8 ++---
 4 files changed, 28 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/75e1cc8b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index ab26342..b91e2c5 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -216,6 +216,7 @@ public class KafkaProducerK, V implements ProducerK, V {
 metricTags.put(client-id, clientId);
 this.accumulator = new 
RecordAccumulator(config.getInt(ProducerConfig.BATCH_SIZE_CONFIG),
  this.totalMemorySize,
+ this.compressionType,
  
config.getLong(ProducerConfig.LINGER_MS_CONFIG),
  retryBackoffMs,
  
config.getBoolean(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG),
@@ -376,7 +377,7 @@ public class KafkaProducerK, V implements ProducerK, V {
 ensureValidRecordSize(serializedSize);
 TopicPartition tp = new TopicPartition(record.topic(), partition);
 log.trace(Sending record {} with callback {} to topic {} 
partition {}, record, callback, record.topic(), partition);
-RecordAccumulator.RecordAppendResult result = 
accumulator.append(tp, serializedKey, serializedValue, compressionType, 
callback);
+RecordAccumulator.RecordAppendResult result = 
accumulator.append(tp, serializedKey, serializedValue, callback);
 if (result.batchIsFull || result.newBatchCreated) {
 log.trace(Waking up the sender since topic {} partition {} is 
either full or getting a new batch, record.topic(), partition);
 this.sender.wakeup();

http://git-wip-us.apache.org/repos/asf/kafka/blob/75e1cc8b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
 
b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
index 88b4e4f..0e7ab29 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
@@ -59,6 +59,7 @@ public final class RecordAccumulator {
 private volatile AtomicInteger flushesInProgress;
 private int drainIndex;
 private final int batchSize;
+private final CompressionType compression;
 private final long lingerMs;
 private final long retryBackoffMs;
 private final BufferPool free;
@@ -71,6 +72,7 @@ public final class RecordAccumulator {
  * 
  * @param batchSize The size to use when allocating {@link 
org.apache.kafka.common.record.MemoryRecords} instances
  * @param totalSize The maximum memory the record accumulator can use.
+ * @param compression The compression codec for the records
  * @param lingerMs An artificial delay time to add before declaring a 
records instance that isn't full ready for
  *sending. This allows time for more records to arrive. Setting a 
non-zero lingerMs will trade off some
  *latency for potentially better throughput due to more batching 
(and hence fewer, larger requests).
@@ -84,6 +86,7 @@ public final class RecordAccumulator {
  */
 public RecordAccumulator(int batchSize,
  

kafka git commit: kafka-2099; BrokerEndPoint file, methods and object names should match; patched by Gwen Shapira; reviewed by Sriharsha Chintalapani and Jun Rao

2015-04-06 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 75e1cc8bc - 04fce48e1


kafka-2099; BrokerEndPoint file, methods and object names should match; patched 
by Gwen Shapira; reviewed by Sriharsha Chintalapani and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/04fce48e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/04fce48e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/04fce48e

Branch: refs/heads/trunk
Commit: 04fce48e1af183d5133be1044d27e0f526e94f2a
Parents: 75e1cc8
Author: Gwen Shapira csh...@gmail.com
Authored: Mon Apr 6 18:20:45 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Mon Apr 6 18:20:45 2015 -0700

--
 core/src/main/scala/kafka/admin/AdminUtils.scala|  8 
 .../main/scala/kafka/api/ConsumerMetadataResponse.scala |  8 
 core/src/main/scala/kafka/api/LeaderAndIsrRequest.scala | 10 +-
 core/src/main/scala/kafka/api/TopicMetadata.scala   | 12 ++--
 .../main/scala/kafka/api/TopicMetadataResponse.scala|  6 +++---
 .../main/scala/kafka/api/UpdateMetadataRequest.scala|  4 ++--
 core/src/main/scala/kafka/client/ClientUtils.scala  | 10 +-
 core/src/main/scala/kafka/cluster/Broker.scala  |  6 +++---
 core/src/main/scala/kafka/cluster/BrokerEndPoint.scala  | 12 ++--
 .../scala/kafka/consumer/ConsumerFetcherManager.scala   |  6 +++---
 .../scala/kafka/consumer/ConsumerFetcherThread.scala|  4 ++--
 .../scala/kafka/javaapi/ConsumerMetadataResponse.scala  |  4 ++--
 core/src/main/scala/kafka/javaapi/TopicMetadata.scala   |  8 
 core/src/main/scala/kafka/producer/ProducerPool.scala   |  6 +++---
 .../scala/kafka/server/AbstractFetcherManager.scala |  8 
 .../main/scala/kafka/server/AbstractFetcherThread.scala |  4 ++--
 core/src/main/scala/kafka/server/MetadataCache.scala|  8 
 .../main/scala/kafka/server/ReplicaFetcherManager.scala |  4 ++--
 .../main/scala/kafka/server/ReplicaFetcherThread.scala  |  4 ++--
 core/src/main/scala/kafka/server/ReplicaManager.scala   |  4 ++--
 .../scala/kafka/tools/ReplicaVerificationTool.scala |  6 +++---
 .../main/scala/kafka/tools/SimpleConsumerShell.scala|  6 +++---
 core/src/main/scala/kafka/utils/ZkUtils.scala   |  2 +-
 .../kafka/api/RequestResponseSerializationTest.scala|  4 ++--
 .../scala/unit/kafka/cluster/BrokerEndPointTest.scala   |  4 ++--
 .../unit/kafka/integration/TopicMetadataTest.scala  |  4 ++--
 .../scala/unit/kafka/producer/AsyncProducerTest.scala   |  8 
 27 files changed, 85 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/04fce48e/core/src/main/scala/kafka/admin/AdminUtils.scala
--
diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala 
b/core/src/main/scala/kafka/admin/AdminUtils.scala
index 0d3332e..eee80f9 100644
--- a/core/src/main/scala/kafka/admin/AdminUtils.scala
+++ b/core/src/main/scala/kafka/admin/AdminUtils.scala
@@ -18,7 +18,7 @@
 package kafka.admin
 
 import kafka.common._
-import kafka.cluster.{BrokerEndpoint, Broker}
+import kafka.cluster.{BrokerEndPoint, Broker}
 
 import kafka.log.LogConfig
 import kafka.utils._
@@ -356,9 +356,9 @@ object AdminUtils extends Logging {
 val leader = ZkUtils.getLeaderForPartition(zkClient, topic, partition)
 debug(replicas =  + replicas + , in sync replicas =  + 
inSyncReplicas + , leader =  + leader)
 
-var leaderInfo: Option[BrokerEndpoint] = None
-var replicaInfo: Seq[BrokerEndpoint] = Nil
-var isrInfo: Seq[BrokerEndpoint] = Nil
+var leaderInfo: Option[BrokerEndPoint] = None
+var replicaInfo: Seq[BrokerEndPoint] = Nil
+var isrInfo: Seq[BrokerEndPoint] = Nil
 try {
   leaderInfo = leader match {
 case Some(l) =

http://git-wip-us.apache.org/repos/asf/kafka/blob/04fce48e/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala
--
diff --git a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala 
b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala
index d2a3d43..ea1c0d0 100644
--- a/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala
+++ b/core/src/main/scala/kafka/api/ConsumerMetadataResponse.scala
@@ -18,18 +18,18 @@
 package kafka.api
 
 import java.nio.ByteBuffer
-import kafka.cluster.BrokerEndpoint
+import kafka.cluster.BrokerEndPoint
 import kafka.common.ErrorMapping
 
 object ConsumerMetadataResponse {
   val CurrentVersion = 0
 
-  private val NoBrokerEndpointOpt = Some(BrokerEndpoint(id = -1, host = , 
port = -1))
+  private val NoBrokerEndpointOpt = Some(BrokerEndPoint(id = -1, host = , 
port = -1))
   
   

[1/3] kafka git commit: kafka-1926; Replace kafka.utils.Utils with o.a.k.common.utils.Utils; patched by Tong Li; reviewed by Jun Rao

2015-04-05 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 53f31432a - 9c23d9355


http://git-wip-us.apache.org/repos/asf/kafka/blob/9c23d935/core/src/test/scala/kafka/tools/TestLogCleaning.scala
--
diff --git a/core/src/test/scala/kafka/tools/TestLogCleaning.scala 
b/core/src/test/scala/kafka/tools/TestLogCleaning.scala
new file mode 100755
index 000..8445894
--- /dev/null
+++ b/core/src/test/scala/kafka/tools/TestLogCleaning.scala
@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.tools
+
+import joptsimple.OptionParser
+import java.util.Properties
+import java.util.Random
+import java.io._
+import kafka.consumer._
+import kafka.serializer._
+import kafka.utils._
+import kafka.log.FileMessageSet
+import kafka.log.Log
+import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer, 
ProducerConfig}
+
+/**
+ * This is a torture test that runs against an existing broker. Here is how it 
works:
+ * 
+ * It produces a series of specially formatted messages to one or more 
partitions. Each message it produces
+ * it logs out to a text file. The messages have a limited set of keys, so 
there is duplication in the key space.
+ * 
+ * The broker will clean its log as the test runs.
+ * 
+ * When the specified number of messages have been produced we create a 
consumer and consume all the messages in the topic
+ * and write that out to another text file.
+ * 
+ * Using a stable unix sort we sort both the producer log of what was sent and 
the consumer log of what was retrieved by the message key. 
+ * Then we compare the final message in both logs for each key. If this final 
message is not the same for all keys we
+ * print an error and exit with exit code 1, otherwise we print the size 
reduction and exit with exit code 0.
+ */
+object TestLogCleaning {
+
+  def main(args: Array[String]) {
+val parser = new OptionParser
+val numMessagesOpt = parser.accepts(messages, The number of messages to 
send or consume.)
+   .withRequiredArg
+   .describedAs(count)
+   .ofType(classOf[java.lang.Long])
+   .defaultsTo(Long.MaxValue)
+val numDupsOpt = parser.accepts(duplicates, The number of duplicates 
for each key.)
+   .withRequiredArg
+   .describedAs(count)
+   .ofType(classOf[java.lang.Integer])
+   .defaultsTo(5)
+val brokerOpt = parser.accepts(broker, Url to connect to.)
+  .withRequiredArg
+  .describedAs(url)
+  .ofType(classOf[String])
+val topicsOpt = parser.accepts(topics, The number of topics to test.)
+  .withRequiredArg
+  .describedAs(count)
+  .ofType(classOf[java.lang.Integer])
+  .defaultsTo(1)
+val percentDeletesOpt = parser.accepts(percent-deletes, The percentage 
of updates that are deletes.)
+ .withRequiredArg
+ .describedAs(percent)
+ .ofType(classOf[java.lang.Integer])
+ .defaultsTo(0)
+val zkConnectOpt = parser.accepts(zk, Zk url.)
+ .withRequiredArg
+ .describedAs(url)
+ .ofType(classOf[String])
+val sleepSecsOpt = parser.accepts(sleep, Time to sleep between 
production and consumption.)
+ .withRequiredArg
+ .describedAs(ms)
+ .ofType(classOf[java.lang.Integer])
+ .defaultsTo(0)
+val dumpOpt = parser.accepts(dump, Dump the message contents of a topic 
partition that contains test data from this test to standard out.)
+.withRequiredArg
+.describedAs(directory)
+.ofType(classOf[String])
+
+val options = parser.parse(args:_*)

[2/3] kafka git commit: kafka-1926; Replace kafka.utils.Utils with o.a.k.common.utils.Utils; patched by Tong Li; reviewed by Jun Rao

2015-04-05 Thread junrao
http://git-wip-us.apache.org/repos/asf/kafka/blob/9c23d935/core/src/main/scala/kafka/tools/TestLogCleaning.scala
--
diff --git a/core/src/main/scala/kafka/tools/TestLogCleaning.scala 
b/core/src/main/scala/kafka/tools/TestLogCleaning.scala
deleted file mode 100644
index af496f7..000
--- a/core/src/main/scala/kafka/tools/TestLogCleaning.scala
+++ /dev/null
@@ -1,311 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the License); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package kafka.tools
-
-import joptsimple.OptionParser
-import java.util.Properties
-import java.util.Random
-import java.io._
-import kafka.consumer._
-import kafka.serializer._
-import kafka.utils._
-import kafka.log.FileMessageSet
-import kafka.log.Log
-import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer, 
ProducerConfig}
-
-/**
- * This is a torture test that runs against an existing broker. Here is how it 
works:
- * 
- * It produces a series of specially formatted messages to one or more 
partitions. Each message it produces
- * it logs out to a text file. The messages have a limited set of keys, so 
there is duplication in the key space.
- * 
- * The broker will clean its log as the test runs.
- * 
- * When the specified number of messages have been produced we create a 
consumer and consume all the messages in the topic
- * and write that out to another text file.
- * 
- * Using a stable unix sort we sort both the producer log of what was sent and 
the consumer log of what was retrieved by the message key. 
- * Then we compare the final message in both logs for each key. If this final 
message is not the same for all keys we
- * print an error and exit with exit code 1, otherwise we print the size 
reduction and exit with exit code 0.
- */
-object TestLogCleaning {
-
-  def main(args: Array[String]) {
-val parser = new OptionParser
-val numMessagesOpt = parser.accepts(messages, The number of messages to 
send or consume.)
-   .withRequiredArg
-   .describedAs(count)
-   .ofType(classOf[java.lang.Long])
-   .defaultsTo(Long.MaxValue)
-val numDupsOpt = parser.accepts(duplicates, The number of duplicates 
for each key.)
-   .withRequiredArg
-   .describedAs(count)
-   .ofType(classOf[java.lang.Integer])
-   .defaultsTo(5)
-val brokerOpt = parser.accepts(broker, Url to connect to.)
-  .withRequiredArg
-  .describedAs(url)
-  .ofType(classOf[String])
-val topicsOpt = parser.accepts(topics, The number of topics to test.)
-  .withRequiredArg
-  .describedAs(count)
-  .ofType(classOf[java.lang.Integer])
-  .defaultsTo(1)
-val percentDeletesOpt = parser.accepts(percent-deletes, The percentage 
of updates that are deletes.)
- .withRequiredArg
- .describedAs(percent)
- .ofType(classOf[java.lang.Integer])
- .defaultsTo(0)
-val zkConnectOpt = parser.accepts(zk, Zk url.)
- .withRequiredArg
- .describedAs(url)
- .ofType(classOf[String])
-val sleepSecsOpt = parser.accepts(sleep, Time to sleep between 
production and consumption.)
- .withRequiredArg
- .describedAs(ms)
- .ofType(classOf[java.lang.Integer])
- .defaultsTo(0)
-val dumpOpt = parser.accepts(dump, Dump the message contents of a topic 
partition that contains test data from this test to standard out.)
-.withRequiredArg
-.describedAs(directory)
-.ofType(classOf[String])
-
-val options = parser.parse(args:_*)
-
-if(args.length == 0)
-  

svn commit: r1671461 - in /kafka/site/083: configuration.html upgrade.html

2015-04-05 Thread junrao
Author: junrao
Date: Mon Apr  6 00:38:03 2015
New Revision: 1671461

URL: http://svn.apache.org/r1671461
Log:
update new 0.8.3 config inter.broker.protocol.version and upgrade path

Modified:
kafka/site/083/configuration.html
kafka/site/083/upgrade.html

Modified: kafka/site/083/configuration.html
URL: 
http://svn.apache.org/viewvc/kafka/site/083/configuration.html?rev=1671461r1=1671460r2=1671461view=diff
==
--- kafka/site/083/configuration.html (original)
+++ kafka/site/083/configuration.html Mon Apr  6 00:38:03 2015
@@ -431,29 +431,34 @@ ZooKeeper also allows you to add a chro
   td5000/td
   tdThe offset commit will be delayed until this timeout or the required 
number of replicas have received the offset commit. This is similar to the 
producer request timeout./td
 /tr
+tr
+  tdinter.broker.protocol.version/td
+  td0.8.3/td
+  tdVersion of the protocol brokers will use to communicate with each 
other. This will default for the current version of the broker, but may need to 
be set to older versions during a rolling upgrade process. In that scenario, 
upgraded brokers will use the older version of the protocol and therefore will 
be able to communicate with brokers that were not yet upgraded. See a 
href=#upgradeupgrade section/a for more details./td
+/tr
 /tbody/table
 
 pMore details about broker configuration can be found in the scala class 
codekafka.server.KafkaConfig/code./p
 
 h4a id=topic-configTopic-level configuration/a/h3
-
+
 Configurations pertinent to topics have both a global default as well an 
optional per-topic override. If no per-topic configuration is given the global 
default is used. The override can be set at topic creation time by giving one 
or more code--config/code options. This example creates a topic named 
imy-topic/i with a custom max message size and flush rate:
 pre
-b gt; bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic 
my-topic --partitions 1 
+b gt; bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic 
my-topic --partitions 1
 --replication-factor 1 --config max.message.bytes=64000 --config 
flush.messages=1/b
 /pre
 Overrides can also be changed or set later using the alter topic command. This 
example updates the max message size for imy-topic/i:
 pre
-b gt; bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic 
my-topic 
+b gt; bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic 
my-topic
 --config max.message.bytes=128000/b
 /pre
 
 To remove an override you can do
 pre
-b gt; bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic 
my-topic 
+b gt; bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic 
my-topic
 --deleteConfig max.message.bytes/b
 /pre
-
+
 The following are the topic-level configurations. The server's default 
configuration for this property is given under the Server Default Property 
heading, setting this default in the server config allows you to change the 
default given to topics that have no override specified.
 table class=data-table
 tbody
@@ -745,7 +750,7 @@ Essential configuration properties for t
   tdrequest.required.acks/td
   td colspan=10/td
   td
-pThis value controls when a produce request is considered completed. 
Specifically, how many other brokers must have committed the data to their log 
and acknowledged this to the leader? Typical values are 
+pThis value controls when a produce request is considered completed. 
Specifically, how many other brokers must have committed the data to their log 
and acknowledged this to the leader? Typical values are
ul
  li0, which means that the producer never waits for an 
acknowledgement from the broker (the same behavior as 0.7). This option 
provides the lowest latency but the weakest durability guarantees (some data 
will be lost when a server fails).
  li 1, which means that the producer gets an acknowledgement 
after the leader replica has received the data. This option provides better 
durability as the client waits until the server acknowledges the request as 
successful (only messages that were written to the now-dead leader but not yet 
replicated will be lost).

Modified: kafka/site/083/upgrade.html
URL: 
http://svn.apache.org/viewvc/kafka/site/083/upgrade.html?rev=1671461r1=1671460r2=1671461view=diff
==
--- kafka/site/083/upgrade.html (original)
+++ kafka/site/083/upgrade.html Mon Apr  6 00:38:03 2015
@@ -1,5 +1,19 @@
 h3a id=upgrade1.5 Upgrading From Previous Versions/a/h3
 
+h4Upgrading from 0.8.0, 0.8.1.X or 0.8.2.X to 0.8.3.0/h4
+
+0.8.3.0 has an inter-broker protocol change from previous versions. For a 
rolling upgrade:
+ol
+   li Update server.properties file on all brokers and add the following 
property

[1/3] kafka git commit: kafka-1809; Refactor brokers to allow listening on multiple ports and IPs; patched by Gwen Shapira; reviewed by Joel Koshy and Jun Rao

2015-04-05 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 07598ad8a - 53f31432a


http://git-wip-us.apache.org/repos/asf/kafka/blob/53f31432/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala
--
diff --git 
a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala 
b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala
index 150c311..62d1832 100644
--- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala
+++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala
@@ -18,9 +18,10 @@ package unit.kafka.server
 
 import java.util.Properties
 
+import kafka.api.ApiVersion
 import kafka.message._
 import kafka.server.{Defaults, KafkaConfig}
-import org.apache.kafka.common.config.ConfigException
+import org.apache.kafka.common.protocol.SecurityProtocol
 import org.junit.{Assert, Test}
 import org.scalatest.junit.JUnit3Suite
 
@@ -172,8 +173,10 @@ class KafkaConfigConfigDefTest extends JUnit3Suite {
 
 case KafkaConfig.PortProp = expected.setProperty(name, 1234)
 case KafkaConfig.HostNameProp = expected.setProperty(name, 
nextString(10))
+case KafkaConfig.ListenersProp = expected.setProperty(name, 
PLAINTEXT://:9092)
 case KafkaConfig.AdvertisedHostNameProp = expected.setProperty(name, 
nextString(10))
 case KafkaConfig.AdvertisedPortProp = expected.setProperty(name, 
4321)
+case KafkaConfig.AdvertisedListenersProp = expected.setProperty(name, 
PLAINTEXT://:2909)
 case KafkaConfig.SocketRequestMaxBytesProp = 
expected.setProperty(name, atLeastOneIntProp)
 case KafkaConfig.MaxConnectionsPerIpProp = expected.setProperty(name, 
atLeastOneIntProp)
 case KafkaConfig.MaxConnectionsPerIpOverridesProp = 
expected.setProperty(name, 127.0.0.1:2, 127.0.0.2:3)
@@ -204,6 +207,9 @@ class KafkaConfigConfigDefTest extends JUnit3Suite {
 case KafkaConfig.MinInSyncReplicasProp = expected.setProperty(name, 
atLeastOneIntProp)
 case KafkaConfig.AutoLeaderRebalanceEnableProp = 
expected.setProperty(name, randFrom(true, false))
 case KafkaConfig.UncleanLeaderElectionEnableProp = 
expected.setProperty(name, randFrom(true, false))
+case KafkaConfig.InterBrokerSecurityProtocolProp = 
expected.setProperty(name, SecurityProtocol.PLAINTEXT.toString)
+case KafkaConfig.InterBrokerProtocolVersionProp = 
expected.setProperty(name, ApiVersion.latestVersion.toString)
+
 case KafkaConfig.ControlledShutdownEnableProp = 
expected.setProperty(name, randFrom(true, false))
 case KafkaConfig.OffsetsLoadBufferSizeProp = 
expected.setProperty(name, atLeastOneIntProp)
 case KafkaConfig.OffsetsTopicPartitionsProp = 
expected.setProperty(name, atLeastOneIntProp)

http://git-wip-us.apache.org/repos/asf/kafka/blob/53f31432/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala 
b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala
index 852fa3b..ca46ba9 100644
--- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala
+++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala
@@ -17,13 +17,15 @@
 
 package kafka.server
 
+import java.util.Properties
+
+import junit.framework.Assert._
+import kafka.api.{ApiVersion, KAFKA_082}
+import kafka.utils.{TestUtils, Utils}
 import org.apache.kafka.common.config.ConfigException
+import org.apache.kafka.common.protocol.SecurityProtocol
 import org.junit.Test
-import junit.framework.Assert._
 import org.scalatest.junit.JUnit3Suite
-import kafka.utils.TestUtils
-import kafka.message.GZIPCompressionCodec
-import kafka.message.NoCompressionCodec
 
 class KafkaConfigTest extends JUnit3Suite {
 
@@ -34,7 +36,6 @@ class KafkaConfigTest extends JUnit3Suite {
 
 val cfg = KafkaConfig.fromProps(props)
 assertEquals(60L * 60L * 1000L, cfg.logRetentionTimeMillis)
-
   }
   
   @Test
@@ -44,7 +45,6 @@ class KafkaConfigTest extends JUnit3Suite {
 
 val cfg = KafkaConfig.fromProps(props)
 assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
-
   }
   
   @Test
@@ -54,7 +54,6 @@ class KafkaConfigTest extends JUnit3Suite {
 
 val cfg = KafkaConfig.fromProps(props)
 assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
-
   }
   
   @Test
@@ -63,7 +62,6 @@ class KafkaConfigTest extends JUnit3Suite {
 
 val cfg = KafkaConfig.fromProps(props)
 assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRetentionTimeMillis)
-
   }
   
   @Test
@@ -74,7 +72,6 @@ class KafkaConfigTest extends JUnit3Suite {
 
 val cfg = KafkaConfig.fromProps(props)
 assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
-
   }
   
   @Test
@@ -85,37 +82,129 @@ class KafkaConfigTest extends JUnit3Suite {
 
 val cfg = KafkaConfig.fromProps(props)
 assertEquals( 30 * 

[2/3] kafka git commit: kafka-1809; Refactor brokers to allow listening on multiple ports and IPs; patched by Gwen Shapira; reviewed by Joel Koshy and Jun Rao

2015-04-05 Thread junrao
http://git-wip-us.apache.org/repos/asf/kafka/blob/53f31432/core/src/main/scala/kafka/server/KafkaConfig.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index 6217302..cf1a5a6 100644
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -19,12 +19,14 @@ package kafka.server
 
 import java.util.Properties
 
+import kafka.api.ApiVersion
+import kafka.cluster.EndPoint
 import kafka.consumer.ConsumerConfig
 import kafka.message.{BrokerCompressionCodec, CompressionCodec, Message, 
MessageSet}
 import kafka.utils.Utils
 import org.apache.kafka.common.config.ConfigDef
-
-import scala.collection.{JavaConversions, Map}
+import org.apache.kafka.common.protocol.SecurityProtocol
+import scala.collection.{immutable, JavaConversions, Map}
 
 object Defaults {
   /** * Zookeeper Configuration ***/
@@ -101,6 +103,8 @@ object Defaults {
   val LeaderImbalancePerBrokerPercentage = 10
   val LeaderImbalanceCheckIntervalSeconds = 300
   val UncleanLeaderElectionEnable = true
+  val InterBrokerSecurityProtocol = SecurityProtocol.PLAINTEXT.toString
+  val InterBrokerProtocolVersion = ApiVersion.latestVersion.toString
 
   /** * Controlled shutdown configuration ***/
   val ControlledShutdownMaxRetries = 3
@@ -142,8 +146,10 @@ object KafkaConfig {
   /** * Socket Server Configuration ***/
   val PortProp = port
   val HostNameProp = host.name
+  val ListenersProp = listeners
   val AdvertisedHostNameProp: String = advertised.host.name
   val AdvertisedPortProp = advertised.port
+  val AdvertisedListenersProp = advertised.listeners
   val SocketSendBufferBytesProp = socket.send.buffer.bytes
   val SocketReceiveBufferBytesProp = socket.receive.buffer.bytes
   val SocketRequestMaxBytesProp = socket.request.max.bytes
@@ -207,6 +213,8 @@ object KafkaConfig {
   val LeaderImbalancePerBrokerPercentageProp = 
leader.imbalance.per.broker.percentage
   val LeaderImbalanceCheckIntervalSecondsProp = 
leader.imbalance.check.interval.seconds
   val UncleanLeaderElectionEnableProp = unclean.leader.election.enable
+  val InterBrokerSecurityProtocolProp = security.inter.broker.protocol
+  val InterBrokerProtocolVersionProp = inter.broker.protocol.version
   /** * Controlled shutdown configuration ***/
   val ControlledShutdownMaxRetriesProp = controlled.shutdown.max.retries
   val ControlledShutdownRetryBackoffMsProp = 
controlled.shutdown.retry.backoff.ms
@@ -246,6 +254,12 @@ object KafkaConfig {
   /** * Socket Server Configuration ***/
   val PortDoc = the port to listen and accept connections on
   val HostNameDoc = hostname of broker. If this is set, it will only bind to 
this address. If this is not set, it will bind to all interfaces
+  val ListenersDoc = Listener List - Comma-separated list of URIs we will 
listen on and their protocols.\n +
+   Specify hostname as 0.0.0.0 to bind to all interfaces.\n +
+   Leave hostname empty to bind to default interface.\n +
+   Examples of legal listener lists:\n +
+   PLAINTEXT://myhost:9092,TRACE://:9091\n +
+   PLAINTEXT://0.0.0.0:9092, TRACE://localhost:9093\n
   val AdvertisedHostNameDoc = Hostname to publish to ZooKeeper for clients to 
use. In IaaS environments, this may  +
 need to be different from the interface to which the broker binds. If 
this is not set,  +
 it will use the value for \host.name\ if configured. Otherwise  +
@@ -253,6 +267,9 @@ object KafkaConfig {
   val AdvertisedPortDoc = The port to publish to ZooKeeper for clients to 
use. In IaaS environments, this may  +
 need to be different from the port to which the broker binds. If this is 
not set,  +
 it will publish the same port that the broker binds to.
+  val AdvertisedListenersDoc = Listeners to publish to ZooKeeper for clients 
to use, if different than the listeners above. +
+   In IaaS environments, this may need to be different from the 
interface to which the broker binds. +
+   If this is not set, the value for \listeners\ will be used.
   val SocketSendBufferBytesDoc = The SO_SNDBUF buffer of the socket sever 
sockets
   val SocketReceiveBufferBytesDoc = The SO_RCVBUF buffer of the socket sever 
sockets
   val SocketRequestMaxBytesDoc = The maximum number of bytes in a socket 
request
@@ -319,6 +336,10 @@ object KafkaConfig {
   val LeaderImbalancePerBrokerPercentageDoc = The ratio of leader imbalance 
allowed per broker. The controller would trigger a leader balance if it goes 
above this value per broker. The value is specified in percentage.
   val LeaderImbalanceCheckIntervalSecondsDoc = The frequency with which the 
partition rebalance check is triggered by the controller
   val UncleanLeaderElectionEnableDoc = Indicates whether to enable replicas 
not 

[3/3] kafka git commit: kafka-1809; Refactor brokers to allow listening on multiple ports and IPs; patched by Gwen Shapira; reviewed by Joel Koshy and Jun Rao

2015-04-05 Thread junrao
kafka-1809; Refactor brokers to allow listening on multiple ports and IPs; 
patched by Gwen Shapira; reviewed by Joel Koshy and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/53f31432
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/53f31432
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/53f31432

Branch: refs/heads/trunk
Commit: 53f31432a0e1da78abf31ad42297790445083072
Parents: 07598ad
Author: Gwen Shapira csh...@gmail.com
Authored: Sun Apr 5 17:21:37 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Sun Apr 5 17:21:37 2015 -0700

--
 .../kafka/clients/producer/ProducerConfig.java  |   1 -
 .../kafka/common/protocol/SecurityProtocol.java |  63 +
 .../org/apache/kafka/common/utils/Utils.java|   4 +-
 .../apache/kafka/common/utils/UtilsTest.java|   4 +-
 config/server.properties|   4 +-
 .../src/main/scala/kafka/admin/AdminUtils.scala |  22 +--
 .../main/scala/kafka/admin/TopicCommand.scala   |   6 +-
 core/src/main/scala/kafka/api/ApiVersion.scala  |  78 +++
 .../kafka/api/ConsumerMetadataResponse.scala|  12 +-
 .../scala/kafka/api/LeaderAndIsrRequest.scala   |  10 +-
 .../main/scala/kafka/api/TopicMetadata.scala|  20 ++-
 .../scala/kafka/api/TopicMetadataResponse.scala |   6 +-
 .../scala/kafka/api/UpdateMetadataRequest.scala |  41 --
 .../main/scala/kafka/client/ClientUtils.scala   |  25 ++--
 core/src/main/scala/kafka/cluster/Broker.scala  | 118 
 .../scala/kafka/cluster/BrokerEndPoint.scala|  67 +
 .../src/main/scala/kafka/cluster/EndPoint.scala |  78 +++
 .../BrokerEndPointNotAvailableException.scala   |  22 +++
 .../kafka/consumer/ConsumerFetcherManager.scala |  10 +-
 .../kafka/consumer/ConsumerFetcherThread.scala  |   4 +-
 .../consumer/ZookeeperConsumerConnector.scala   |   3 +-
 .../controller/ControllerChannelManager.scala   |  13 +-
 .../kafka/controller/KafkaController.scala  |   9 +-
 .../javaapi/ConsumerMetadataResponse.scala  |   5 +-
 .../scala/kafka/javaapi/TopicMetadata.scala |   8 +-
 .../scala/kafka/network/RequestChannel.scala|   5 +-
 .../main/scala/kafka/network/SocketServer.scala |  90 
 .../scala/kafka/producer/ProducerPool.scala |  18 +--
 .../kafka/server/AbstractFetcherManager.scala   |   8 +-
 .../kafka/server/AbstractFetcherThread.scala|   5 +-
 .../src/main/scala/kafka/server/KafkaApis.scala |  12 +-
 .../main/scala/kafka/server/KafkaConfig.scala   | 105 --
 .../scala/kafka/server/KafkaHealthcheck.scala   |  25 ++--
 .../main/scala/kafka/server/KafkaServer.scala   | 114 ---
 .../main/scala/kafka/server/MetadataCache.scala |  25 ++--
 .../kafka/server/ReplicaFetcherManager.scala|   4 +-
 .../kafka/server/ReplicaFetcherThread.scala |   4 +-
 .../scala/kafka/server/ReplicaManager.scala |   4 +-
 .../kafka/tools/ConsumerOffsetChecker.scala |   2 +-
 .../kafka/tools/ReplicaVerificationTool.scala   |   6 +-
 .../scala/kafka/tools/SimpleConsumerShell.scala |  18 ++-
 .../scala/kafka/tools/UpdateOffsetsInZK.scala   |   5 +-
 core/src/main/scala/kafka/utils/Utils.scala |   9 ++
 core/src/main/scala/kafka/utils/ZkUtils.scala   |  38 +++--
 .../kafka/api/ProducerSendTest.scala|   2 -
 .../scala/other/kafka/TestOffsetManager.scala   |   2 +-
 .../test/scala/unit/kafka/KafkaConfigTest.scala |   8 +-
 .../unit/kafka/admin/AddPartitionsTest.scala|   7 +-
 .../api/RequestResponseSerializationTest.scala  |  74 --
 .../unit/kafka/cluster/BrokerEndPointTest.scala | 124 
 .../unit/kafka/integration/FetcherTest.scala|   4 +-
 .../integration/KafkaServerTestHarness.scala|   6 +-
 .../kafka/integration/TopicMetadataTest.scala   |  15 +-
 .../src/test/scala/unit/kafka/log/LogTest.scala |   2 +-
 .../unit/kafka/network/SocketServerTest.scala   |  52 +--
 .../unit/kafka/producer/AsyncProducerTest.scala |   8 +-
 .../unit/kafka/producer/SyncProducerTest.scala  |  27 ++--
 .../unit/kafka/server/AdvertiseBrokerTest.scala |  15 +-
 .../kafka/server/KafkaConfigConfigDefTest.scala |   8 +-
 .../unit/kafka/server/KafkaConfigTest.scala | 140 +++
 .../unit/kafka/server/LeaderElectionTest.scala  |  18 ++-
 .../test/scala/unit/kafka/utils/TestUtils.scala |  12 +-
 system_test/README.txt  |  34 ++---
 .../testcase_0001/testcase_0001_properties.json |   8 +-
 .../testcase_0002/testcase_0002_properties.json |   8 +-
 .../testcase_0003/testcase_0003_properties.json |   8 +-
 .../testcase_0004/testcase_0004_properties.json |   8 +-
 .../testcase_0005/testcase_0005_properties.json |   8 +-
 .../testcase_0006/testcase_0006_properties.json |   8 +-
 .../testcase_0007/testcase_0007_properties.json |   8 +-
 .../testcase_0008/testcase_0008_properties.json |   8 +-
 

[3/3] kafka git commit: kafka-1926; Replace kafka.utils.Utils with o.a.k.common.utils.Utils; patched by Tong Li; reviewed by Jun Rao

2015-04-05 Thread junrao
kafka-1926; Replace kafka.utils.Utils with o.a.k.common.utils.Utils; patched by 
Tong Li; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/9c23d935
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/9c23d935
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/9c23d935

Branch: refs/heads/trunk
Commit: 9c23d93553a33c5d85231193614d192a9945796e
Parents: 53f3143
Author: Tong Li liton...@us.ibm.com
Authored: Sun Apr 5 21:46:11 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Sun Apr 5 21:46:11 2015 -0700

--
 .../org/apache/kafka/common/utils/Utils.java| 124 +++-
 .../apache/kafka/common/utils/UtilsTest.java|  33 +
 core/src/main/scala/kafka/Kafka.scala   |   3 +-
 .../kafka/admin/ConsumerGroupCommand.scala  |   1 +
 .../PreferredReplicaLeaderElectionCommand.scala |   4 +-
 .../kafka/admin/ReassignPartitionsCommand.scala |   9 +-
 .../main/scala/kafka/admin/TopicCommand.scala   |   3 +-
 .../main/scala/kafka/client/ClientUtils.scala   |   4 +-
 core/src/main/scala/kafka/cluster/Broker.scala  |  16 +-
 .../main/scala/kafka/cluster/Partition.scala|   2 +-
 .../kafka/consumer/ConsumerFetcherManager.scala |   2 +-
 .../scala/kafka/consumer/ConsumerIterator.scala |   2 +-
 .../kafka/consumer/PartitionAssignor.scala  |   4 +-
 .../main/scala/kafka/consumer/TopicCount.scala  |   4 +-
 .../consumer/ZookeeperConsumerConnector.scala   |   2 +-
 .../controller/ControllerChannelManager.scala   |   4 +-
 .../kafka/controller/KafkaController.scala  |   2 +-
 .../controller/PartitionStateMachine.scala  |   3 +-
 .../kafka/controller/ReplicaStateMachine.scala  |   2 +-
 .../kafka/controller/TopicDeletionManager.scala |   2 +-
 .../main/scala/kafka/log/FileMessageSet.scala   |   6 +-
 core/src/main/scala/kafka/log/Log.scala |   6 +-
 .../scala/kafka/log/LogCleanerManager.scala |   2 +-
 core/src/main/scala/kafka/log/LogConfig.scala   |   2 +-
 core/src/main/scala/kafka/log/LogManager.scala  |   8 +-
 core/src/main/scala/kafka/log/LogSegment.scala  |   8 +-
 core/src/main/scala/kafka/log/OffsetIndex.scala |   8 +-
 core/src/main/scala/kafka/log/OffsetMap.scala   |   3 +-
 core/src/main/scala/kafka/message/Message.scala |   3 +-
 .../kafka/message/MessageAndMetadata.scala  |   2 +-
 .../scala/kafka/message/MessageWriter.scala |   2 +-
 .../kafka/metrics/KafkaCSVMetricsReporter.scala |   4 +-
 .../kafka/metrics/KafkaMetricsConfig.scala  |   4 +-
 .../kafka/metrics/KafkaMetricsReporter.scala|   6 +-
 .../network/BoundedByteBufferReceive.scala  |   4 +-
 .../main/scala/kafka/network/SocketServer.scala |   1 +
 .../kafka/producer/ByteArrayPartitioner.scala   |   1 +
 .../kafka/producer/DefaultPartitioner.scala |   1 +
 .../main/scala/kafka/producer/Producer.scala|   6 +-
 .../scala/kafka/producer/ProducerConfig.scala   |   6 +-
 .../producer/async/DefaultEventHandler.scala|   9 +-
 .../kafka/server/AbstractFetcherManager.scala   |   5 +-
 .../kafka/server/AbstractFetcherThread.scala|   2 +-
 .../kafka/server/BrokerMetadataCheckpoint.scala |   2 +-
 .../main/scala/kafka/server/KafkaConfig.scala   |  18 +-
 .../kafka/server/KafkaRequestHandler.scala  |   1 +
 .../main/scala/kafka/server/KafkaServer.scala   |  20 +-
 .../main/scala/kafka/server/MetadataCache.scala |   2 +-
 .../main/scala/kafka/server/OffsetManager.scala |   1 +
 .../kafka/server/ZookeeperLeaderElector.scala   |   2 +-
 .../scala/kafka/tools/ConsoleConsumer.scala |   1 +
 .../scala/kafka/tools/DumpLogSegments.scala |   5 +-
 .../scala/kafka/tools/KafkaMigrationTool.java   |   2 +-
 .../main/scala/kafka/tools/MirrorMaker.scala|  14 +-
 .../scala/kafka/tools/SimpleConsumerShell.scala |   1 +
 .../kafka/tools/StateChangeLogMerger.scala  |   4 +-
 .../scala/kafka/tools/TestEndToEndLatency.scala |  92 ---
 .../scala/kafka/tools/TestLogCleaning.scala | 311 -
 .../scala/kafka/tools/UpdateOffsetsInZK.scala   |   4 +-
 core/src/main/scala/kafka/utils/CoreUtils.scala | 347 ++
 core/src/main/scala/kafka/utils/Crc32.java  | 637 ---
 .../main/scala/kafka/utils/KafkaScheduler.scala |   4 +-
 .../scala/kafka/utils/Log4jController.scala |   2 +-
 core/src/main/scala/kafka/utils/Logging.scala   |  10 +-
 core/src/main/scala/kafka/utils/Utils.scala | 619 --
 .../kafka/utils/VerifiableProperties.scala  |   2 +-
 .../kafka/api/ProducerCompressionTest.scala |   4 +-
 .../scala/kafka/tools/TestEndToEndLatency.scala |  91 +++
 .../scala/kafka/tools/TestLogCleaning.scala | 311 +
 .../test/scala/other/kafka/DeleteZKPath.scala   |   3 +-
 .../test/scala/other/kafka/StressTestLog.scala  |   2 +-
 .../scala/other/kafka/TestCrcPerformance.scala  |   3 +-
 .../other/kafka/TestLinearWriteSpeed.scala  |   4 +-
 

kafka git commit: kafka-1992; checkEnoughReplicasReachOffset doesn't need to get requiredAcks; patched by Gwen Shapira; reviewed by Jeff Holoman, Jiangjie Qin and Jun Rao

2015-04-07 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk b1cc72510 - d9ab917dc


kafka-1992; checkEnoughReplicasReachOffset doesn't need to get requiredAcks; 
patched by Gwen Shapira; reviewed by Jeff Holoman, Jiangjie Qin and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d9ab917d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d9ab917d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d9ab917d

Branch: refs/heads/trunk
Commit: d9ab917dcc972aa3ef3644aeb4c96094bd9e54d8
Parents: b1cc725
Author: Gwen Shapira csh...@gmail.com
Authored: Tue Apr 7 15:10:47 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Apr 7 15:10:47 2015 -0700

--
 .../main/scala/kafka/cluster/Partition.scala| 34 
 .../scala/kafka/server/DelayedProduce.scala |  4 +--
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d9ab917d/core/src/main/scala/kafka/cluster/Partition.scala
--
diff --git a/core/src/main/scala/kafka/cluster/Partition.scala 
b/core/src/main/scala/kafka/cluster/Partition.scala
index 3fb549c..122b1db 100755
--- a/core/src/main/scala/kafka/cluster/Partition.scala
+++ b/core/src/main/scala/kafka/cluster/Partition.scala
@@ -292,31 +292,37 @@ class Partition(val topic: String,
 }
   }
 
-  def checkEnoughReplicasReachOffset(requiredOffset: Long, requiredAcks: Int): 
(Boolean, Short) = {
+  /*
+   * Note that this method will only be called if requiredAcks = -1
+   * and we are waiting for all replicas in ISR to be fully caught up to
+   * the (local) leader's offset corresponding to this produce request
+   * before we acknowledge the produce request.
+   */
+  def checkEnoughReplicasReachOffset(requiredOffset: Long): (Boolean, Short) = 
{
 leaderReplicaIfLocal() match {
   case Some(leaderReplica) =
 // keep the current immutable replica list reference
 val curInSyncReplicas = inSyncReplicas
 val numAcks = curInSyncReplicas.count(r = {
   if (!r.isLocal)
-r.logEndOffset.messageOffset = requiredOffset
+if (r.logEndOffset.messageOffset = requiredOffset) {
+  trace(Replica %d of %s-%d received offset 
%d.format(r.brokerId, topic, partitionId, requiredOffset))
+  true
+}
+else
+  false
   else
 true /* also count the local (leader) replica */
 })
-val minIsr = leaderReplica.log.get.config.minInSyncReplicas
 
-trace(%d/%d acks satisfied for %s-%d.format(numAcks, requiredAcks, 
topic, partitionId))
+trace(%d acks satisfied for %s-%d with acks = -1.format(numAcks, 
topic, partitionId))
+
+val minIsr = leaderReplica.log.get.config.minInSyncReplicas
 
-if (requiredAcks  0  leaderReplica.highWatermark.messageOffset = 
requiredOffset ) {
+if (leaderReplica.highWatermark.messageOffset = requiredOffset ) {
   /*
-  * requiredAcks  0 means acknowledge after all replicas in ISR
-  * are fully caught up to the (local) leader's offset
-  * corresponding to this produce request.
-  *
-  * minIsr means that the topic is configured not to accept messages
-  * if there are not enough replicas in ISR
-  * in this scenario the request was already appended locally and
-  * then added to the purgatory before the ISR was shrunk
+  * The topic may be configured not to accept messages if there are 
not enough replicas in ISR
+  * in this scenario the request was already appended locally and then 
added to the purgatory before the ISR was shrunk
   */
   if (minIsr = curInSyncReplicas.size) {
 (true, ErrorMapping.NoError)
@@ -412,7 +418,7 @@ class Partition(val topic: String,
   // Avoid writing to leader if there are not enough insync replicas 
to make it safe
   if (inSyncSize  minIsr  requiredAcks == -1) {
 throw new NotEnoughReplicasException(Number of insync replicas 
for partition [%s,%d] is [%d], below required minimum [%d]
-  .format(topic,partitionId,minIsr,inSyncSize))
+  .format(topic, partitionId, inSyncSize, minIsr))
   }
 
   val info = log.append(messages, assignOffsets = true)

http://git-wip-us.apache.org/repos/asf/kafka/blob/d9ab917d/core/src/main/scala/kafka/server/DelayedProduce.scala
--
diff --git a/core/src/main/scala/kafka/server/DelayedProduce.scala 
b/core/src/main/scala/kafka/server/DelayedProduce.scala
index 4d763bf..05078b2 100644
--- a/core/src/main/scala/kafka/server/DelayedProduce.scala
+++ 

kafka git commit: kafka-1517; Messages is a required argument to Producer Performance Test; patched by Daniel Compton; reviewed by Jun Rao

2015-04-07 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 06a26656f - fef709d51


kafka-1517; Messages is a required argument to Producer Performance Test; 
patched by Daniel Compton; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/fef709d5
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/fef709d5
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/fef709d5

Branch: refs/heads/trunk
Commit: fef709d51ae3d568c6b6a9ff41a16f1969d4ffd6
Parents: 06a2665
Author: Daniel Compton d...@danielcompton.net
Authored: Tue Apr 7 22:18:56 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Apr 7 22:18:56 2015 -0700

--
 core/src/main/scala/kafka/tools/PerfConfig.scala | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/fef709d5/core/src/main/scala/kafka/tools/PerfConfig.scala
--
diff --git a/core/src/main/scala/kafka/tools/PerfConfig.scala 
b/core/src/main/scala/kafka/tools/PerfConfig.scala
index d073acf..298bb29 100644
--- a/core/src/main/scala/kafka/tools/PerfConfig.scala
+++ b/core/src/main/scala/kafka/tools/PerfConfig.scala
@@ -22,11 +22,10 @@ import joptsimple.OptionParser
 
 class PerfConfig(args: Array[String]) {
   val parser = new OptionParser
-  val numMessagesOpt = parser.accepts(messages, The number of messages to 
send or consume)
+  val numMessagesOpt = parser.accepts(messages, REQUIRED: The number of 
messages to send or consume)
 .withRequiredArg
 .describedAs(count)
 .ofType(classOf[java.lang.Long])
-.defaultsTo(Long.MaxValue)
   val reportingIntervalOpt = parser.accepts(reporting-interval, Interval at 
which to print progress info.)
 .withRequiredArg
 .describedAs(size)



[2/3] kafka git commit: kafka-1928; Move kafka.network over to using the network classes in org.apache.kafka.common.network; patched by Gwen Shapira; reviewed by Joel Koshy, Jay Kreps, Jiangjie Qin, G

2015-06-03 Thread junrao
http://git-wip-us.apache.org/repos/asf/kafka/blob/78ba492e/clients/src/main/java/org/apache/kafka/common/network/Selector.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/network/Selector.java 
b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
index 57de058..effb1e6 100644
--- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java
+++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
@@ -17,17 +17,8 @@ import java.io.IOException;
 import java.net.ConnectException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.nio.channels.CancelledKeyException;
-import java.nio.channels.SelectionKey;
-import java.nio.channels.SocketChannel;
-import java.nio.channels.UnresolvedAddressException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.nio.channels.*;
+import java.util.*;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.kafka.common.KafkaException;
@@ -40,20 +31,21 @@ import org.apache.kafka.common.metrics.stats.Avg;
 import org.apache.kafka.common.metrics.stats.Count;
 import org.apache.kafka.common.metrics.stats.Max;
 import org.apache.kafka.common.metrics.stats.Rate;
+import org.apache.kafka.common.utils.SystemTime;
 import org.apache.kafka.common.utils.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A selector interface for doing non-blocking multi-connection network I/O.
+ * A nioSelector interface for doing non-blocking multi-connection network I/O.
  * p
  * This class works with {@link NetworkSend} and {@link NetworkReceive} to 
transmit size-delimited network requests and
  * responses.
  * p
- * A connection can be added to the selector associated with an integer id by 
doing
+ * A connection can be added to the nioSelector associated with an integer id 
by doing
  * 
  * pre
- * selector.connect(42, new InetSocketAddress(quot;google.comquot;, 
server.port), 64000, 64000);
+ * nioSelector.connect(42, new InetSocketAddress(quot;google.comquot;, 
server.port), 64000, 64000);
  * /pre
  * 
  * The connect call does not block on the creation of the TCP connection, so 
the connect method only begins initiating
@@ -64,10 +56,10 @@ import org.slf4j.LoggerFactory;
  * 
  * pre
  * Listlt;NetworkRequestgt; requestsToSend = Arrays.asList(new 
NetworkRequest(0, myBytes), new NetworkRequest(1, myOtherBytes));
- * selector.poll(TIMEOUT_MS, requestsToSend);
+ * nioSelector.poll(TIMEOUT_MS, requestsToSend);
  * /pre
  * 
- * The selector maintains several lists that are reset by each call to 
codepoll()/code which are available via
+ * The nioSelector maintains several lists that are reset by each call to 
codepoll()/code which are available via
  * various getters. These are reset by each call to codepoll()/code.
  * 
  * This class is not thread safe!
@@ -76,41 +68,59 @@ public class Selector implements Selectable {
 
 private static final Logger log = LoggerFactory.getLogger(Selector.class);
 
-private final java.nio.channels.Selector selector;
-private final MapInteger, SelectionKey keys;
-private final ListNetworkSend completedSends;
+private final java.nio.channels.Selector nioSelector;
+private final MapString, SelectionKey keys;
+private final ListSend completedSends;
 private final ListNetworkReceive completedReceives;
-private final ListInteger disconnected;
-private final ListInteger connected;
-private final ListInteger failedSends;
+private final ListString disconnected;
+private final ListString connected;
+private final ListString failedSends;
 private final Time time;
 private final SelectorMetrics sensors;
 private final String metricGrpPrefix;
 private final MapString, String metricTags;
+private final MapString, Long lruConnections;
+private final long connectionsMaxIdleNanos;
+private final int maxReceiveSize;
+private final boolean metricsPerConnection;
+private long currentTimeNanos;
+private long nextIdleCloseCheckTime;
+
 
 /**
- * Create a new selector
+ * Create a new nioSelector
  */
-public Selector(Metrics metrics, Time time, String metricGrpPrefix, 
MapString, String metricTags) {
+public Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics 
metrics, Time time, String metricGrpPrefix, MapString, String metricTags, 
boolean metricsPerConnection) {
 try {
-this.selector = java.nio.channels.Selector.open();
+this.nioSelector = java.nio.channels.Selector.open();
 } catch (IOException e) {
 throw new KafkaException(e);
 }
+this.maxReceiveSize = maxReceiveSize;
+this.connectionsMaxIdleNanos = connectionMaxIdleMs * 1000 * 1000;
 

[1/3] kafka git commit: kafka-1928; Move kafka.network over to using the network classes in org.apache.kafka.common.network; patched by Gwen Shapira; reviewed by Joel Koshy, Jay Kreps, Jiangjie Qin, G

2015-06-03 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk d22987f01 - 78ba492e3


http://git-wip-us.apache.org/repos/asf/kafka/blob/78ba492e/core/src/main/scala/kafka/network/BoundedByteBufferReceive.scala
--
diff --git a/core/src/main/scala/kafka/network/BoundedByteBufferReceive.scala 
b/core/src/main/scala/kafka/network/BoundedByteBufferReceive.scala
deleted file mode 100755
index c0d7726..000
--- a/core/src/main/scala/kafka/network/BoundedByteBufferReceive.scala
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the License); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package kafka.network
-
-import java.nio._
-import java.nio.channels._
-import kafka.utils._
-
-/**
- * Represents a communication between the client and server
- * 
- */
-@nonthreadsafe
-private[kafka] class BoundedByteBufferReceive(val maxSize: Int) extends 
Receive with Logging {
-  
-  private val sizeBuffer = ByteBuffer.allocate(4)
-  private var contentBuffer: ByteBuffer = null
-  
-  def this() = this(Int.MaxValue)
-  
-  var complete: Boolean = false
-  
-  /**
-   * Get the content buffer for this transmission
-   */
-  def buffer: ByteBuffer = {
-expectComplete()
-contentBuffer
-  }
-  
-  /**
-   * Read the bytes in this response from the given channel
-   */
-  def readFrom(channel: ReadableByteChannel): Int = {
-expectIncomplete()
-var read = 0
-// have we read the request size yet?
-if(sizeBuffer.remaining  0)
-  read += CoreUtils.read(channel, sizeBuffer)
-// have we allocated the request buffer yet?
-if(contentBuffer == null  !sizeBuffer.hasRemaining) {
-  sizeBuffer.rewind()
-  val size = sizeBuffer.getInt()
-  if(size = 0)
-throw new InvalidRequestException(%d is not a valid request 
size..format(size))
-  if(size  maxSize)
-throw new InvalidRequestException(Request of length %d is not valid, 
it is larger than the maximum size of %d bytes..format(size, maxSize))
-  contentBuffer = byteBufferAllocate(size)
-}
-// if we have a buffer read some stuff into it
-if(contentBuffer != null) {
-  read = CoreUtils.read(channel, contentBuffer)
-  // did we get everything?
-  if(!contentBuffer.hasRemaining) {
-contentBuffer.rewind()
-complete = true
-  }
-}
-read
-  }
-
-  private def byteBufferAllocate(size: Int): ByteBuffer = {
-var buffer: ByteBuffer = null
-try {
-  buffer = ByteBuffer.allocate(size)
-} catch {
-  case e: OutOfMemoryError =
-error(OOME with size  + size, e)
-throw e
-  case e2: Throwable =
-throw e2
-}
-buffer
-  }
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/78ba492e/core/src/main/scala/kafka/network/BoundedByteBufferSend.scala
--
diff --git a/core/src/main/scala/kafka/network/BoundedByteBufferSend.scala 
b/core/src/main/scala/kafka/network/BoundedByteBufferSend.scala
deleted file mode 100644
index b95b73b..000
--- a/core/src/main/scala/kafka/network/BoundedByteBufferSend.scala
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the License); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package kafka.network
-
-import java.nio._
-import java.nio.channels._
-import kafka.utils._
-import kafka.api.RequestOrResponse
-import org.apache.kafka.common.requests.{AbstractRequestResponse, 
ResponseHeader}
-
-@nonthreadsafe
-private[kafka] class 

[3/3] kafka git commit: kafka-1928; Move kafka.network over to using the network classes in org.apache.kafka.common.network; patched by Gwen Shapira; reviewed by Joel Koshy, Jay Kreps, Jiangjie Qin, G

2015-06-03 Thread junrao
kafka-1928; Move kafka.network over to using the network classes in 
org.apache.kafka.common.network; patched by Gwen Shapira; reviewed by Joel 
Koshy, Jay Kreps, Jiangjie Qin, Guozhang Wang and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/78ba492e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/78ba492e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/78ba492e

Branch: refs/heads/trunk
Commit: 78ba492e3e70fd9db61bc82469371d04a8d6b762
Parents: d22987f
Author: Gwen Shapira csh...@gmail.com
Authored: Wed Jun 3 21:40:35 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Wed Jun 3 21:40:35 2015 -0700

--
 .../kafka/clients/ClusterConnectionStates.java  |  78 ++---
 .../kafka/clients/CommonClientConfigs.java  |   2 +
 .../apache/kafka/clients/InFlightRequests.java  |  18 +-
 .../org/apache/kafka/clients/KafkaClient.java   |   8 +-
 .../org/apache/kafka/clients/NetworkClient.java |  59 ++--
 .../kafka/clients/consumer/ConsumerConfig.java  |  11 +-
 .../kafka/clients/consumer/KafkaConsumer.java   |   3 +-
 .../clients/consumer/internals/Coordinator.java |   4 +-
 .../clients/consumer/internals/Fetcher.java |  10 +-
 .../kafka/clients/producer/KafkaProducer.java   |   3 +-
 .../kafka/clients/producer/ProducerConfig.java  |   6 +-
 .../clients/producer/internals/Sender.java  |   6 +-
 .../main/java/org/apache/kafka/common/Node.java |  10 +
 .../kafka/common/network/ByteBufferReceive.java |  10 +-
 .../kafka/common/network/ByteBufferSend.java|  20 +-
 .../common/network/InvalidReceiveException.java |  30 ++
 .../apache/kafka/common/network/MultiSend.java  | 100 ++
 .../kafka/common/network/NetworkReceive.java|  59 +++-
 .../kafka/common/network/NetworkSend.java   |   2 +-
 .../apache/kafka/common/network/Receive.java|   8 +-
 .../apache/kafka/common/network/Selectable.java |  16 +-
 .../apache/kafka/common/network/Selector.java   | 230 -
 .../org/apache/kafka/common/network/Send.java   |  18 +-
 .../kafka/common/requests/RequestSend.java  |   2 +-
 .../kafka/common/requests/ResponseSend.java |  41 +++
 .../org/apache/kafka/clients/MockClient.java|   6 +-
 .../apache/kafka/clients/NetworkClientTest.java |   8 +-
 .../kafka/common/network/SelectorTest.java  |  86 ++---
 .../org/apache/kafka/test/MockSelector.java |  25 +-
 core/src/main/scala/kafka/Kafka.scala   |  12 +-
 .../kafka/admin/ConsumerGroupCommand.scala  |   2 +-
 .../kafka/api/ConsumerMetadataRequest.scala |   7 +-
 .../kafka/api/ControlledShutdownRequest.scala   |   9 +-
 .../src/main/scala/kafka/api/FetchRequest.scala |   2 +-
 .../main/scala/kafka/api/FetchResponse.scala|  73 ++--
 .../scala/kafka/api/LeaderAndIsrRequest.scala   |  12 +-
 .../scala/kafka/api/OffsetCommitRequest.scala   |  10 +-
 .../scala/kafka/api/OffsetFetchRequest.scala|  15 +-
 .../main/scala/kafka/api/OffsetRequest.scala|   7 +-
 .../main/scala/kafka/api/ProducerRequest.scala  |   7 +-
 core/src/main/scala/kafka/api/RequestKeys.scala |   4 +-
 .../scala/kafka/api/StopReplicaRequest.scala|   4 +-
 .../scala/kafka/api/TopicMetadataRequest.scala  |   8 +-
 .../scala/kafka/api/UpdateMetadataRequest.scala |   4 +-
 .../main/scala/kafka/client/ClientUtils.scala   |   2 +-
 .../scala/kafka/consumer/SimpleConsumer.scala   |  19 +-
 .../consumer/ZookeeperConsumerConnector.scala   |   4 +-
 .../controller/ControllerChannelManager.scala   |  11 +-
 .../kafka/javaapi/TopicMetadataRequest.scala|   7 +-
 .../scala/kafka/network/BlockingChannel.scala   |  21 +-
 .../network/BoundedByteBufferReceive.scala  |  90 -
 .../kafka/network/BoundedByteBufferSend.scala   |  71 
 .../scala/kafka/network/ByteBufferSend.scala|  40 ---
 core/src/main/scala/kafka/network/Handler.scala |   6 +-
 .../scala/kafka/network/RequestChannel.scala|  35 +-
 .../kafka/network/RequestOrResponseSend.scala   |  57 
 .../main/scala/kafka/network/SocketServer.scala | 334 ---
 .../main/scala/kafka/network/Transmission.scala | 122 ---
 .../scala/kafka/producer/SyncProducer.scala |  19 +-
 .../src/main/scala/kafka/server/KafkaApis.scala |  44 +--
 .../main/scala/kafka/server/KafkaConfig.scala   |  56 +++-
 .../main/scala/kafka/server/KafkaServer.scala   |  33 +-
 .../scala/kafka/server/MessageSetSend.scala |  71 
 .../kafka/tools/ConsumerOffsetChecker.scala |   2 +-
 .../scala/other/kafka/TestOffsetManager.scala   |   6 +-
 .../test/scala/unit/kafka/KafkaConfigTest.scala |  17 +-
 .../unit/kafka/network/SocketServerTest.scala   |  41 +--
 .../kafka/server/KafkaConfigConfigDefTest.scala |   8 +
 68 files changed, 1075 insertions(+), 1096 deletions(-)
--



kafka git commit: kafka-2235; LogCleaner offset map overflow; patched by Ivan Simoneko; reviewed by Jun Rao

2015-06-22 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk cf28f8939 - dc54055d0


kafka-2235; LogCleaner offset map overflow; patched by Ivan Simoneko; reviewed 
by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/dc54055d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/dc54055d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/dc54055d

Branch: refs/heads/trunk
Commit: dc54055d05742a4a7729a1fe1073c18e3d95cbb2
Parents: cf28f89
Author: Ivan Simoneko simonenko@gmail.com
Authored: Mon Jun 22 09:19:45 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jun 22 09:19:45 2015 -0700

--
 core/src/main/scala/kafka/log/LogCleaner.scala | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/dc54055d/core/src/main/scala/kafka/log/LogCleaner.scala
--
diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala 
b/core/src/main/scala/kafka/log/LogCleaner.scala
index d07a391..b36ea0d 100644
--- a/core/src/main/scala/kafka/log/LogCleaner.scala
+++ b/core/src/main/scala/kafka/log/LogCleaner.scala
@@ -559,11 +559,17 @@ private[log] class Cleaner(val id: Int,
 // but we may be able to fit more (if there is lots of duplication in the 
dirty section of the log)
 var offset = dirty.head.baseOffset
 require(offset == start, Last clean offset is %d but segment base offset 
is %d for log %s..format(start, offset, log.name))
-val minStopOffset = (start + map.slots * this.dupBufferLoadFactor).toLong
-for (segment - dirty) {
+val maxDesiredMapSize = (map.slots * this.dupBufferLoadFactor).toInt
+var full = false
+for (segment - dirty if !full) {
   checkDone(log.topicAndPartition)
-  if(segment.baseOffset = minStopOffset || map.utilization  
this.dupBufferLoadFactor)
+  val segmentSize = segment.nextOffset() - segment.baseOffset
+
+  require(segmentSize = maxDesiredMapSize, %d messages in segment %s/%s 
but offset map can fit only %d. You can increase log.cleaner.dedupe.buffer.size 
or decrease log.cleaner.threads.format(segmentSize,  log.name, 
segment.log.file.getName, maxDesiredMapSize))
+  if (map.size + segmentSize = maxDesiredMapSize)
 offset = buildOffsetMapForSegment(log.topicAndPartition, segment, map)
+  else
+full = true
 }
 info(Offset map for log %s complete..format(log.name))
 offset



kafka git commit: kafka-2290; OffsetIndex should open RandomAccessFile consistently; patched by Chris Black; reviewed by Jun Rao

2015-06-22 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 16ecf9806 - cf28f8939


kafka-2290; OffsetIndex should open RandomAccessFile consistently; patched by 
Chris Black; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/cf28f893
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/cf28f893
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/cf28f893

Branch: refs/heads/trunk
Commit: cf28f893963c363bca43747e2f37fad3bb67d033
Parents: 16ecf98
Author: Chris Black chrisbbl...@gmail.om
Authored: Mon Jun 22 08:59:05 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Mon Jun 22 08:59:05 2015 -0700

--
 core/src/main/scala/kafka/log/OffsetIndex.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/cf28f893/core/src/main/scala/kafka/log/OffsetIndex.scala
--
diff --git a/core/src/main/scala/kafka/log/OffsetIndex.scala 
b/core/src/main/scala/kafka/log/OffsetIndex.scala
index a1082ae..332d5e2 100755
--- a/core/src/main/scala/kafka/log/OffsetIndex.scala
+++ b/core/src/main/scala/kafka/log/OffsetIndex.scala
@@ -274,7 +274,7 @@ class OffsetIndex(@volatile var file: File, val baseOffset: 
Long, val maxIndexSi
*/
   def resize(newSize: Int) {
 inLock(lock) {
-  val raf = new RandomAccessFile(file, rws)
+  val raf = new RandomAccessFile(file, rw)
   val roundedNewSize = roundToExactMultiple(newSize, 8)
   val position = this.mmap.position
   



kafka git commit: kafka-1758; corrupt recovery file prevents startup; patched by Manikumar Reddy; reviewed by Neha Narkhede and Jun Rao

2015-06-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 1c93bb16a - 19c98cb8e


kafka-1758; corrupt recovery file prevents startup; patched by Manikumar Reddy; 
reviewed by Neha Narkhede and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/19c98cb8
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/19c98cb8
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/19c98cb8

Branch: refs/heads/trunk
Commit: 19c98cb8ed69d3d6da787dc04e1e88add5f6b9d7
Parents: 1c93bb1
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Thu Jun 18 18:50:52 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jun 18 18:50:52 2015 -0700

--
 core/src/main/scala/kafka/log/LogManager.scala | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/19c98cb8/core/src/main/scala/kafka/log/LogManager.scala
--
diff --git a/core/src/main/scala/kafka/log/LogManager.scala 
b/core/src/main/scala/kafka/log/LogManager.scala
index 538fc83..69386c1 100755
--- a/core/src/main/scala/kafka/log/LogManager.scala
+++ b/core/src/main/scala/kafka/log/LogManager.scala
@@ -126,7 +126,15 @@ class LogManager(val logDirs: Array[File],
 brokerState.newState(RecoveringFromUncleanShutdown)
   }
 
-  val recoveryPoints = this.recoveryPointCheckpoints(dir).read
+  var recoveryPoints = Map[TopicAndPartition, Long]()
+  try {
+recoveryPoints = this.recoveryPointCheckpoints(dir).read
+  } catch {
+case e: Exception = {
+  warn(Error occured while reading recovery-point-offset-checkpoint 
file of directory  + dir, e)
+  warn(Resetting the recovery checkpoint to 0)
+}
+  }
 
   val jobsForDir = for {
 dirContent - Option(dir.listFiles).toList



kafka git commit: kafka-2012; Broker should automatically handle corrupt index files; patched by Manikumar Reddy; reviewed by Jun Rao

2015-06-19 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk ca758252c - 16ecf9806


kafka-2012; Broker should automatically handle corrupt index files;  patched by 
Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/16ecf980
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/16ecf980
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/16ecf980

Branch: refs/heads/trunk
Commit: 16ecf9806b286d9510103a4426bf0901d7dc8778
Parents: ca75825
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Fri Jun 19 09:34:22 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jun 19 09:34:22 2015 -0700

--
 core/src/main/scala/kafka/log/Log.scala | 18 ---
 .../src/test/scala/unit/kafka/log/LogTest.scala | 33 
 2 files changed, 46 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/16ecf980/core/src/main/scala/kafka/log/Log.scala
--
diff --git a/core/src/main/scala/kafka/log/Log.scala 
b/core/src/main/scala/kafka/log/Log.scala
index 6b9274d..e5e8007 100644
--- a/core/src/main/scala/kafka/log/Log.scala
+++ b/core/src/main/scala/kafka/log/Log.scala
@@ -169,7 +169,7 @@ class Log(val dir: File,
   } else if(filename.endsWith(LogFileSuffix)) {
 // if its a log file, load the corresponding log segment
 val start = filename.substring(0, filename.length - 
LogFileSuffix.length).toLong
-val hasIndex = Log.indexFilename(dir, start).exists
+val indexFile = Log.indexFilename(dir, start)
 val segment = new LogSegment(dir = dir, 
  startOffset = start,
  indexIntervalBytes = 
config.indexInterval, 
@@ -177,7 +177,18 @@ class Log(val dir: File,
  rollJitterMs = config.randomSegmentJitter,
  time = time,
  fileAlreadyExists = true)
-if(!hasIndex) {
+
+if(indexFile.exists()) {
+  try {
+  segment.index.sanityCheck()
+  } catch {
+case e: java.lang.IllegalArgumentException =
+  warn(Found an corrupted index file, %s, deleting and rebuilding 
indexformat(indexFile.getAbsolutePath))
+  indexFile.delete()
+  segment.recover(config.maxMessageSize)
+  }
+}
+else {
   error(Could not find index file corresponding to log file %s, 
rebuilding indexformat(segment.log.file.getAbsolutePath))
   segment.recover(config.maxMessageSize)
 }
@@ -223,9 +234,6 @@ class Log(val dir: File,
   activeSegment.index.resize(config.maxIndexSize)
 }
 
-// sanity check the index file of every segment to ensure we don't proceed 
with a corrupt segment
-for (s - logSegments)
-  s.index.sanityCheck()
   }
 
   private def updateLogEndOffset(messageOffset: Long) {

http://git-wip-us.apache.org/repos/asf/kafka/blob/16ecf980/core/src/test/scala/unit/kafka/log/LogTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/log/LogTest.scala 
b/core/src/test/scala/unit/kafka/log/LogTest.scala
index a8e57c2..9e26190 100755
--- a/core/src/test/scala/unit/kafka/log/LogTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogTest.scala
@@ -489,6 +489,39 @@ class LogTest extends JUnitSuite {
   }
 
   /**
+   * Test that if we have corrupted an index segment it is rebuilt when the 
log is re-opened
+   */
+  @Test
+  def testCorruptIndexRebuild() {
+// publish the messages and close the log
+val numMessages = 200
+val logProps = new Properties()
+logProps.put(LogConfig.SegmentBytesProp, 200: java.lang.Integer)
+logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer)
+
+val config = LogConfig(logProps)
+var log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
+for(i - 0 until numMessages)
+  log.append(TestUtils.singleMessageSet(TestUtils.randomBytes(10)))
+val indexFiles = log.logSegments.map(_.index.file)
+log.close()
+
+// corrupt all the index files
+for( file - indexFiles) {
+  val bw = new BufferedWriter(new FileWriter(file))
+  bw.write(  )
+  bw.close()
+}
+
+// reopen the log
+log = new Log(logDir, config, recoveryPoint = 200L, time.scheduler, time)
+assertEquals(Should have %d messages when log is 
reopened.format(numMessages), numMessages, log.logEndOffset)
+for(i - 0 until numMessages)
+  assertEquals(i, log.read(i, 100, None).messageSet.head.offset)
+log.close()
+  }
+
+  /**
* Test the Log truncate 

[2/2] kafka git commit: kafka-2249; KafkaConfig does not preserve original Properties; patched by Gwen Shapira; reviewed by Jun Rao

2015-06-18 Thread junrao
kafka-2249; KafkaConfig does not preserve original Properties; patched by Gwen 
Shapira; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/5c904074
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/5c904074
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/5c904074

Branch: refs/heads/trunk
Commit: 5c9040745466945a04ea0315de583ccdab0614ac
Parents: ba86f0a
Author: Gwen Shapira csh...@gmail.com
Authored: Thu Jun 18 14:07:33 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jun 18 14:07:33 2015 -0700

--
 .../kafka/common/config/AbstractConfig.java |  12 +-
 .../main/scala/kafka/cluster/Partition.scala|   2 +-
 .../kafka/controller/KafkaController.scala  |   4 +-
 .../controller/PartitionLeaderSelector.scala|   2 +-
 core/src/main/scala/kafka/log/LogConfig.scala   | 156 ++---
 core/src/main/scala/kafka/log/LogManager.scala  |   2 +-
 .../src/main/scala/kafka/server/KafkaApis.scala |   4 +-
 .../main/scala/kafka/server/KafkaConfig.scala   | 573 +--
 .../main/scala/kafka/server/KafkaServer.scala   |  55 +-
 .../kafka/server/ReplicaFetcherThread.scala |   4 +-
 .../scala/kafka/server/TopicConfigManager.scala |   5 +-
 core/src/main/scala/kafka/utils/CoreUtils.scala |  26 -
 .../test/scala/other/kafka/StressTestLog.scala  |  10 +-
 .../other/kafka/TestLinearWriteSpeed.scala  |   7 +-
 .../unit/kafka/log/BrokerCompressionTest.scala  |   7 +-
 .../test/scala/unit/kafka/log/CleanerTest.scala |  55 +-
 .../kafka/log/LogCleanerIntegrationTest.scala   |   8 +-
 .../scala/unit/kafka/log/LogConfigTest.scala|  19 +-
 .../scala/unit/kafka/log/LogManagerTest.scala   |  17 +-
 .../src/test/scala/unit/kafka/log/LogTest.scala | 121 +++-
 .../kafka/server/DynamicConfigChangeTest.scala  |  17 +-
 .../kafka/server/KafkaConfigConfigDefTest.scala |  20 +-
 22 files changed, 444 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/5c904074/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java 
b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
index c4fa058..bae528d 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
@@ -57,15 +57,19 @@ public class AbstractConfig {
 return values.get(key);
 }
 
-public int getInt(String key) {
+public Short getShort(String key) {
+return (Short) get(key);
+}
+
+public Integer getInt(String key) {
 return (Integer) get(key);
 }
 
-public long getLong(String key) {
+public Long getLong(String key) {
 return (Long) get(key);
 }
 
-public double getDouble(String key) {
+public Double getDouble(String key) {
 return (Double) get(key);
 }
 
@@ -92,7 +96,7 @@ public class AbstractConfig {
 return keys;
 }
 
-public MapString, ? originals() {
+public MapString, Object originals() {
 MapString, Object copy = new HashMapString, Object();
 copy.putAll(originals);
 return copy;

http://git-wip-us.apache.org/repos/asf/kafka/blob/5c904074/core/src/main/scala/kafka/cluster/Partition.scala
--
diff --git a/core/src/main/scala/kafka/cluster/Partition.scala 
b/core/src/main/scala/kafka/cluster/Partition.scala
index 730a232..6cb6477 100755
--- a/core/src/main/scala/kafka/cluster/Partition.scala
+++ b/core/src/main/scala/kafka/cluster/Partition.scala
@@ -86,7 +86,7 @@ class Partition(val topic: String,
   case Some(replica) = replica
   case None =
 if (isReplicaLocal(replicaId)) {
-  val config = LogConfig.fromProps(logManager.defaultConfig.toProps, 
AdminUtils.fetchTopicConfig(zkClient, topic))
+  val config = LogConfig.fromProps(logManager.defaultConfig.originals, 
AdminUtils.fetchTopicConfig(zkClient, topic))
   val log = logManager.createLog(TopicAndPartition(topic, 
partitionId), config)
   val checkpoint = 
replicaManager.highWatermarkCheckpoints(log.dir.getParentFile.getAbsolutePath)
   val offsetMap = checkpoint.read

http://git-wip-us.apache.org/repos/asf/kafka/blob/5c904074/core/src/main/scala/kafka/controller/KafkaController.scala
--
diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala 
b/core/src/main/scala/kafka/controller/KafkaController.scala
index 69bba24..3635057 100755
--- a/core/src/main/scala/kafka/controller/KafkaController.scala

[1/2] kafka git commit: kafka-2249; KafkaConfig does not preserve original Properties; patched by Gwen Shapira; reviewed by Jun Rao

2015-06-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk ba86f0a25 - 5c9040745


http://git-wip-us.apache.org/repos/asf/kafka/blob/5c904074/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala 
b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
index 3fd5a53..c31f884 100644
--- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
@@ -26,22 +26,9 @@ import org.scalatest.junit.JUnit3Suite
 class LogConfigTest extends JUnit3Suite {
 
   @Test
-  def testFromPropsDefaults() {
-val defaults = new Properties()
-defaults.put(LogConfig.SegmentBytesProp, 4242)
-val props = new Properties(defaults)
-
-val config = LogConfig.fromProps(props)
-
-Assert.assertEquals(4242, config.segmentSize)
-Assert.assertEquals(LogConfig defaults should be retained, 
Defaults.MaxMessageSize, config.maxMessageSize)
-Assert.assertEquals(producer, config.compressionType)
-  }
-
-  @Test
   def testFromPropsEmpty() {
 val p = new Properties()
-val config = LogConfig.fromProps(p)
+val config = LogConfig(p)
 Assert.assertEquals(LogConfig(), config)
   }
 
@@ -62,7 +49,7 @@ class LogConfigTest extends JUnit3Suite {
   }
 })
 
-val actual = LogConfig.fromProps(expected).toProps
+val actual = LogConfig(expected).originals
 Assert.assertEquals(expected, actual)
   }
 
@@ -86,7 +73,7 @@ class LogConfigTest extends JUnit3Suite {
   val props = new Properties
   props.setProperty(name, value.toString)
   intercept[ConfigException] {
-LogConfig.fromProps(props)
+LogConfig(props)
   }
 })
   }

http://git-wip-us.apache.org/repos/asf/kafka/blob/5c904074/core/src/test/scala/unit/kafka/log/LogManagerTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala 
b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala
index 01dfbc4..a13f2be 100755
--- a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala
@@ -18,6 +18,7 @@
 package kafka.log
 
 import java.io._
+import java.util.Properties
 import junit.framework.Assert._
 import org.junit.Test
 import org.scalatest.junit.JUnit3Suite
@@ -30,7 +31,11 @@ class LogManagerTest extends JUnit3Suite {
   val time: MockTime = new MockTime()
   val maxRollInterval = 100
   val maxLogAgeMs = 10*60*60*1000
-  val logConfig = LogConfig(segmentSize = 1024, maxIndexSize = 4096, 
retentionMs = maxLogAgeMs)
+  val logProps = new Properties()
+  logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
+  logProps.put(LogConfig.SegmentIndexBytesProp, 4096: java.lang.Integer)
+  logProps.put(LogConfig.RetentionMsProp, maxLogAgeMs: java.lang.Integer)
+  val logConfig = LogConfig(logProps)
   var logDir: File = null
   var logManager: LogManager = null
   val name = kafka
@@ -113,8 +118,11 @@ class LogManagerTest extends JUnit3Suite {
   def testCleanupSegmentsToMaintainSize() {
 val setSize = TestUtils.singleMessageSet(test.getBytes()).sizeInBytes
 logManager.shutdown()
+val logProps = new Properties()
+logProps.put(LogConfig.SegmentBytesProp, 10 * setSize: java.lang.Integer)
+logProps.put(LogConfig.RetentionBytesProp, 5L * 10L * setSize + 10L: 
java.lang.Long)
+val config = LogConfig.fromProps(logConfig.originals, logProps)
 
-val config = logConfig.copy(segmentSize = 10 * setSize, retentionSize = 5L 
* 10L * setSize + 10L)
 logManager = createLogManager()
 logManager.startup
 
@@ -154,7 +162,10 @@ class LogManagerTest extends JUnit3Suite {
   @Test
   def testTimeBasedFlush() {
 logManager.shutdown()
-val config = logConfig.copy(flushMs = 1000)
+val logProps = new Properties()
+logProps.put(LogConfig.FlushMsProp, 1000: java.lang.Integer)
+val config = LogConfig.fromProps(logConfig.originals, logProps)
+
 logManager = createLogManager()
 logManager.startup
 val log = logManager.createLog(TopicAndPartition(name, 0), config)

http://git-wip-us.apache.org/repos/asf/kafka/blob/5c904074/core/src/test/scala/unit/kafka/log/LogTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/log/LogTest.scala 
b/core/src/test/scala/unit/kafka/log/LogTest.scala
index 8e095d6..a8e57c2 100755
--- a/core/src/test/scala/unit/kafka/log/LogTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogTest.scala
@@ -18,6 +18,7 @@
 package kafka.log
 
 import java.io._
+import java.util.Properties
 import java.util.concurrent.atomic._
 import junit.framework.Assert._
 import org.scalatest.junit.JUnitSuite
@@ -61,9 +62,12 @@ class LogTest extends JUnitSuite {
   def testTimeBasedLogRoll() {
 val set = TestUtils.singleMessageSet(test.getBytes())
 
+

[1/2] kafka git commit: kafka-2168; New consumer poll() can block other calls like position(), commit(), and close() indefinitely; patched by Jason Gustafson; reviewed by Jay Kreps, Ewen Cheslack-Post

2015-06-22 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 2270a7537 - b6d326b08


http://git-wip-us.apache.org/repos/asf/kafka/blob/b6d326b0/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java
index 56281ee..695eaf6 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java
@@ -19,7 +19,6 @@ import org.apache.kafka.clients.KafkaClient;
 import org.apache.kafka.clients.Metadata;
 import org.apache.kafka.clients.RequestCompletionHandler;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.NoOffsetForPartitionException;
 import org.apache.kafka.common.Cluster;
 import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.Node;
@@ -61,9 +60,6 @@ import java.util.Map;
 public class FetcherK, V {
 
 private static final Logger log = LoggerFactory.getLogger(Fetcher.class);
-private static final long EARLIEST_OFFSET_TIMESTAMP = -2L;
-private static final long LATEST_OFFSET_TIMESTAMP = -1L;
-
 
 private final KafkaClient client;
 
@@ -72,23 +68,19 @@ public class FetcherK, V {
 private final int maxWaitMs;
 private final int fetchSize;
 private final boolean checkCrcs;
-private final long retryBackoffMs;
 private final Metadata metadata;
 private final FetchManagerMetrics sensors;
 private final SubscriptionState subscriptions;
 private final ListPartitionRecordsK, V records;
-private final AutoOffsetResetStrategy offsetResetStrategy;
 private final DeserializerK keyDeserializer;
 private final DeserializerV valueDeserializer;
 
 
 public Fetcher(KafkaClient client,
-   long retryBackoffMs,
int minBytes,
int maxWaitMs,
int fetchSize,
boolean checkCrcs,
-   String offsetReset,
DeserializerK keyDeserializer,
DeserializerV valueDeserializer,
Metadata metadata,
@@ -102,17 +94,16 @@ public class FetcherK, V {
 this.client = client;
 this.metadata = metadata;
 this.subscriptions = subscriptions;
-this.retryBackoffMs = retryBackoffMs;
 this.minBytes = minBytes;
 this.maxWaitMs = maxWaitMs;
 this.fetchSize = fetchSize;
 this.checkCrcs = checkCrcs;
-this.offsetResetStrategy = 
AutoOffsetResetStrategy.valueOf(offsetReset);
 
 this.keyDeserializer = keyDeserializer;
 this.valueDeserializer = valueDeserializer;
 
 this.records = new LinkedListPartitionRecordsK, V();
+
 this.sensors = new FetchManagerMetrics(metrics, metricGrpPrefix, 
metricTags);
 }
 
@@ -166,84 +157,76 @@ public class FetcherK, V {
 }
 
 /**
- * Reset offsets for the given partition using the offset reset strategy.
- *
- * @param partition The given partition that needs reset offset
- * @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException 
If no offset reset strategy is defined
- */
-public void resetOffset(TopicPartition partition) {
-long timestamp;
-if (this.offsetResetStrategy == AutoOffsetResetStrategy.EARLIEST)
-timestamp = EARLIEST_OFFSET_TIMESTAMP;
-else if (this.offsetResetStrategy == AutoOffsetResetStrategy.LATEST)
-timestamp = LATEST_OFFSET_TIMESTAMP;
-else
-throw new NoOffsetForPartitionException(No offset is set and no 
reset policy is defined);
-
-log.debug(Resetting offset for partition {} to {} offset., 
partition, this.offsetResetStrategy.name()
-.toLowerCase());
-this.subscriptions.seek(partition, offsetBefore(partition, timestamp));
-}
-
-/**
  * Fetch a single offset before the given timestamp for the partition.
  *
  * @param topicPartition The partition that needs fetching offset.
  * @param timestamp The timestamp for fetching offset.
- * @return The offset of the message that is published before the given 
timestamp
+ * @return A response which can be polled to obtain the corresponding 
offset.
  */
-public long offsetBefore(TopicPartition topicPartition, long timestamp) {
-log.debug(Fetching offsets for partition {}., topicPartition);
+public RequestFutureLong listOffset(final TopicPartition topicPartition, 
long timestamp) {
 MapTopicPartition, ListOffsetRequest.PartitionData partitions = new 
HashMapTopicPartition, ListOffsetRequest.PartitionData(1);
 partitions.put(topicPartition, new 
ListOffsetRequest.PartitionData(timestamp, 1));
- 

[2/2] kafka git commit: kafka-2168; New consumer poll() can block other calls like position(), commit(), and close() indefinitely; patched by Jason Gustafson; reviewed by Jay Kreps, Ewen Cheslack-Post

2015-06-22 Thread junrao
kafka-2168; New consumer poll() can block other calls like position(), 
commit(), and close() indefinitely; patched by Jason Gustafson; reviewed by Jay 
Kreps, Ewen Cheslack-Postava, Guozhang Wang and Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/b6d326b0
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/b6d326b0
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/b6d326b0

Branch: refs/heads/trunk
Commit: b6d326b0893e60b350608260fd1bd2542337cb5a
Parents: 2270a75
Author: Jason Gustafson a...@confluent.io
Authored: Tue Jun 23 00:07:19 2015 -0400
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 23 00:09:06 2015 -0400

--
 .../apache/kafka/clients/consumer/Consumer.java |   5 +
 .../kafka/clients/consumer/ConsumerRecords.java |   7 +
 .../consumer/ConsumerWakeupException.java   |  20 +
 .../kafka/clients/consumer/KafkaConsumer.java   | 715 +++
 .../kafka/clients/consumer/MockConsumer.java|   9 +-
 .../clients/consumer/OffsetResetStrategy.java   |  17 +
 .../clients/consumer/internals/Coordinator.java | 447 ++--
 .../clients/consumer/internals/Fetcher.java | 159 ++---
 .../clients/consumer/internals/Heartbeat.java   |  10 +
 .../consumer/internals/RequestFuture.java   | 209 ++
 .../consumer/internals/SubscriptionState.java   |  41 +-
 .../org/apache/kafka/common/utils/Utils.java|  15 +
 .../clients/consumer/MockConsumerTest.java  |   2 +-
 .../consumer/internals/CoordinatorTest.java | 148 +++-
 .../clients/consumer/internals/FetcherTest.java |  32 +-
 .../consumer/internals/HeartbeatTest.java   |   9 +
 .../internals/SubscriptionStateTest.java|  19 +-
 .../apache/kafka/common/utils/UtilsTest.java|   8 +
 18 files changed, 1330 insertions(+), 542 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/b6d326b0/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
index 8f587bc..fd98740 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
@@ -108,4 +108,9 @@ public interface ConsumerK, V extends Closeable {
  */
 public void close();
 
+/**
+ * @see KafkaConsumer#wakeup()
+ */
+public void wakeup();
+
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/b6d326b0/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
index 1ca75f8..eb75d2e 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java
@@ -27,6 +27,8 @@ import java.util.Map;
  * {@link Consumer#poll(long)} operation.
  */
 public class ConsumerRecordsK, V implements IterableConsumerRecordK, V {
+public static final ConsumerRecordsObject, Object EMPTY =
+new ConsumerRecordsObject, Object(Collections.EMPTY_MAP);
 
 private final MapTopicPartition, ListConsumerRecordK, V records;
 
@@ -103,4 +105,9 @@ public class ConsumerRecordsK, V implements 
IterableConsumerRecordK, V {
 }
 }
 
+@SuppressWarnings(unchecked)
+public static K, V ConsumerRecordsK, V empty() {
+return (ConsumerRecordsK, V) EMPTY;
+}
+
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/b6d326b0/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java
new file mode 100644
index 000..35f1ec9
--- /dev/null
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
+ * to You under the Apache License, Version 2.0 (the License); you may not 
use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required 

kafka git commit: kafka-2265; creating a topic with large number of partitions takes a long time; patched by Manikumar Reddy; reviewed by Jun Rao

2015-06-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 5c9040745 - d9c0ad685


kafka-2265; creating a topic with large number of partitions takes a long time; 
patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d9c0ad68
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d9c0ad68
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d9c0ad68

Branch: refs/heads/trunk
Commit: d9c0ad6855b4871694ddf17c9d4546b021302ee8
Parents: 5c90407
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Thu Jun 18 15:59:11 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jun 18 15:59:11 2015 -0700

--
 .../scala/kafka/controller/PartitionStateMachine.scala | 13 +
 1 file changed, 1 insertion(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d9c0ad68/core/src/main/scala/kafka/controller/PartitionStateMachine.scala
--
diff --git a/core/src/main/scala/kafka/controller/PartitionStateMachine.scala 
b/core/src/main/scala/kafka/controller/PartitionStateMachine.scala
index 92fd92d..b4e7c88 100755
--- a/core/src/main/scala/kafka/controller/PartitionStateMachine.scala
+++ b/core/src/main/scala/kafka/controller/PartitionStateMachine.scala
@@ -188,7 +188,6 @@ class PartitionStateMachine(controller: KafkaController) 
extends Logging {
 case NewPartition =
   // pre: partition did not exist before this
   assertValidPreviousStates(topicAndPartition, 
List(NonExistentPartition), NewPartition)
-  assignReplicasToPartitions(topic, partition)
   partitionState.put(topicAndPartition, NewPartition)
   val assignedReplicas = 
controllerContext.partitionReplicaAssignment(topicAndPartition).mkString(,)
   stateChangeLogger.trace(Controller %d epoch %d changed partition %s 
state from %s to %s with assigned replicas %s
@@ -266,17 +265,6 @@ class PartitionStateMachine(controller: KafkaController) 
extends Logging {
   }
 
   /**
-   * Invoked on the NonExistentPartition-NewPartition state transition to 
update the controller's cache with the
-   * partition's replica assignment.
-   * @param topic The topic of the partition whose replica assignment is 
to be cached
-   * @param partition The partition whose replica assignment is to be cached
-   */
-  private def assignReplicasToPartitions(topic: String, partition: Int) {
-val assignedReplicas = 
ZkUtils.getReplicasForPartition(controllerContext.zkClient, topic, partition)
-controllerContext.partitionReplicaAssignment += TopicAndPartition(topic, 
partition) - assignedReplicas
-  }
-
-  /**
* Invoked on the NewPartition-OnlinePartition state change. When a 
partition is in the New state, it does not have
* a leader and isr path in zookeeper. Once the partition moves to the 
OnlinePartition state, it's leader and isr
* path gets initialized and it never goes back to the NewPartition state. 
From here, it can only go to the
@@ -526,6 +514,7 @@ class PartitionStateMachine(controller: KafkaController) 
extends Logging {
   else {
 if (partitionsToBeAdded.size  0) {
   info(New partitions to be added %s.format(partitionsToBeAdded))
+  
controllerContext.partitionReplicaAssignment.++=(partitionsToBeAdded)
   
controller.onNewPartitionCreation(partitionsToBeAdded.keySet.toSet)
 }
   }



kafka git commit: kafka-2234; Partition reassignment of a nonexistent topic prevents future reassignments; patched by Manikumar Reddy; reviewed by Jun Rao

2015-06-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk d9c0ad685 - 5c2ca30f2


kafka-2234; Partition reassignment of a nonexistent topic prevents future 
reassignments; patched by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/5c2ca30f
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/5c2ca30f
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/5c2ca30f

Branch: refs/heads/trunk
Commit: 5c2ca30f229c7f39fca65aed6bd45c382aacda77
Parents: d9c0ad6
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Thu Jun 18 16:37:25 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jun 18 16:37:25 2015 -0700

--
 .../scala/kafka/admin/ReassignPartitionsCommand.scala| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/5c2ca30f/core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
--
diff --git a/core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala 
b/core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
index 912b718..ea34589 100755
--- a/core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
+++ b/core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
@@ -208,9 +208,14 @@ class ReassignPartitionsCommand(zkClient: ZkClient, 
partitions: collection.Map[T
   def reassignPartitions(): Boolean = {
 try {
   val validPartitions = partitions.filter(p = validatePartition(zkClient, 
p._1.topic, p._1.partition))
-  val jsonReassignmentData = 
ZkUtils.getPartitionReassignmentZkData(validPartitions)
-  ZkUtils.createPersistentPath(zkClient, ZkUtils.ReassignPartitionsPath, 
jsonReassignmentData)
-  true
+  if(validPartitions.isEmpty) {
+false
+  }
+  else {
+val jsonReassignmentData = 
ZkUtils.getPartitionReassignmentZkData(validPartitions)
+ZkUtils.createPersistentPath(zkClient, ZkUtils.ReassignPartitionsPath, 
jsonReassignmentData)
+true
+  }
 } catch {
   case ze: ZkNodeExistsException =
 val partitionsBeingReassigned = 
ZkUtils.getPartitionsBeingReassigned(zkClient)



kafka git commit: trivial change to fix unit test failure introduced in kafka-2234

2015-06-18 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 5c2ca30f2 - 1c93bb16a


trivial change to fix unit test failure introduced in kafka-2234


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1c93bb16
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1c93bb16
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1c93bb16

Branch: refs/heads/trunk
Commit: 1c93bb16a91788d3489d4d784c13f1f0fddb6fb7
Parents: 5c2ca30
Author: Jun Rao jun...@gmail.com
Authored: Thu Jun 18 17:00:34 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Thu Jun 18 17:00:34 2015 -0700

--
 core/src/test/scala/unit/kafka/admin/AdminTest.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1c93bb16/core/src/test/scala/unit/kafka/admin/AdminTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/admin/AdminTest.scala 
b/core/src/test/scala/unit/kafka/admin/AdminTest.scala
index efb2f8e..252ac81 100755
--- a/core/src/test/scala/unit/kafka/admin/AdminTest.scala
+++ b/core/src/test/scala/unit/kafka/admin/AdminTest.scala
@@ -242,7 +242,7 @@ class AdminTest extends JUnit3Suite with 
ZooKeeperTestHarness with Logging {
 val partitionToBeReassigned = 0
 val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
 val reassignPartitionsCommand = new ReassignPartitionsCommand(zkClient, 
Map(topicAndPartition - newReplicas))
-assertTrue(Partition reassignment failed for test, 0, 
reassignPartitionsCommand.reassignPartitions())
+assertFalse(Partition reassignment failed for test, 0, 
reassignPartitionsCommand.reassignPartitions())
 val reassignedPartitions = ZkUtils.getPartitionsBeingReassigned(zkClient)
 assertFalse(Partition should not be reassigned, 
reassignedPartitions.contains(topicAndPartition))
 servers.foreach(_.shutdown())



kafka git commit: kafka-2195; Add versionId to AbstractRequest.getErrorResponse and AbstractRequest.getRequest; patched by Andrii Biletskyi; reviewed by Jun Rao

2015-06-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 20a31a29f - 54e54f080


kafka-2195; Add versionId to AbstractRequest.getErrorResponse and 
AbstractRequest.getRequest; patched by Andrii Biletskyi; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/54e54f08
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/54e54f08
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/54e54f08

Branch: refs/heads/trunk
Commit: 54e54f08077c9d71a5121e640b55836e6f7f2c9b
Parents: 20a31a2
Author: Andrii Biletskyi andrii.bilets...@stealth.ly
Authored: Tue Jun 16 14:46:48 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 16 14:46:48 2015 -0700

--
 .../kafka/common/requests/AbstractRequest.java  | 24 +++
 .../requests/ConsumerMetadataRequest.java   | 14 +++-
 .../kafka/common/requests/FetchRequest.java | 18 +++--
 .../kafka/common/requests/HeartbeatRequest.java | 20 --
 .../kafka/common/requests/JoinGroupRequest.java | 26 +--
 .../common/requests/JoinGroupResponse.java  |  4 --
 .../common/requests/ListOffsetRequest.java  | 14 +++-
 .../kafka/common/requests/MetadataRequest.java  | 21 +-
 .../kafka/common/requests/MetadataResponse.java | 73 +---
 .../common/requests/OffsetCommitRequest.java| 14 +++-
 .../common/requests/OffsetFetchRequest.java | 20 --
 .../kafka/common/requests/ProduceRequest.java   | 17 +++--
 .../common/requests/RequestResponseTest.java| 23 +++---
 .../scala/kafka/network/RequestChannel.scala|  2 +-
 .../src/main/scala/kafka/server/KafkaApis.scala |  2 +-
 15 files changed, 192 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/54e54f08/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java 
b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
index 5e5308e..5d3d528 100644
--- 
a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
+++ 
b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java
@@ -28,33 +28,33 @@ public abstract class AbstractRequest extends 
AbstractRequestResponse {
 }
 
 /**
- * Get an error response for a request
+ * Get an error response for a request for a given api version
  */
-public abstract AbstractRequestResponse getErrorResponse(Throwable e);
+public abstract AbstractRequestResponse getErrorResponse(int versionId, 
Throwable e);
 
 /**
  * Factory method for getting a request object based on ApiKey ID and a 
buffer
  */
-public static AbstractRequest getRequest(int requestId, ByteBuffer buffer) 
{
+public static AbstractRequest getRequest(int requestId, int versionId, 
ByteBuffer buffer) {
 switch (ApiKeys.forId(requestId)) {
 case PRODUCE:
-return ProduceRequest.parse(buffer);
+return ProduceRequest.parse(buffer, versionId);
 case FETCH:
-return FetchRequest.parse(buffer);
+return FetchRequest.parse(buffer, versionId);
 case LIST_OFFSETS:
-return ListOffsetRequest.parse(buffer);
+return ListOffsetRequest.parse(buffer, versionId);
 case METADATA:
-return MetadataRequest.parse(buffer);
+return MetadataRequest.parse(buffer, versionId);
 case OFFSET_COMMIT:
-return OffsetCommitRequest.parse(buffer);
+return OffsetCommitRequest.parse(buffer, versionId);
 case OFFSET_FETCH:
-return OffsetFetchRequest.parse(buffer);
+return OffsetFetchRequest.parse(buffer, versionId);
 case CONSUMER_METADATA:
-return ConsumerMetadataRequest.parse(buffer);
+return ConsumerMetadataRequest.parse(buffer, versionId);
 case JOIN_GROUP:
-return JoinGroupRequest.parse(buffer);
+return JoinGroupRequest.parse(buffer, versionId);
 case HEARTBEAT:
-return HeartbeatRequest.parse(buffer);
+return HeartbeatRequest.parse(buffer, versionId);
 default:
 return null;
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/54e54f08/clients/src/main/java/org/apache/kafka/common/requests/ConsumerMetadataRequest.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerMetadataRequest.java
 

kafka git commit: kafka-2272; listeners endpoint parsing fails if the hostname has capital letter; patched by Sriharsha Chintalapani; reviewed by Jun Rao

2015-06-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 395716ebf - 28ecea421


kafka-2272; listeners endpoint parsing fails if the hostname has capital 
letter; patched by Sriharsha Chintalapani; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/28ecea42
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/28ecea42
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/28ecea42

Branch: refs/heads/trunk
Commit: 28ecea421794d0c9a1c4f95375ccd1a6dfd8f365
Parents: 395716e
Author: Sriharsha Chintalapani schintalap...@hortonworks.com
Authored: Tue Jun 16 15:25:16 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 16 15:25:16 2015 -0700

--
 core/src/main/scala/kafka/cluster/EndPoint.scala   | 2 +-
 .../src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala | 6 ++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/28ecea42/core/src/main/scala/kafka/cluster/EndPoint.scala
--
diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala 
b/core/src/main/scala/kafka/cluster/EndPoint.scala
index e9008e6..76997b5 100644
--- a/core/src/main/scala/kafka/cluster/EndPoint.scala
+++ b/core/src/main/scala/kafka/cluster/EndPoint.scala
@@ -42,7 +42,7 @@ object EndPoint {
* @return
*/
   def createEndPoint(connectionString: String): EndPoint = {
-val uriParseExp = ^(.*)://\[?([0-9a-z\-.:]*)\]?:(-?[0-9]+).r
+val uriParseExp = ^(.*)://\[?([0-9a-zA-Z\-.:]*)\]?:(-?[0-9]+).r
 connectionString match {
   case uriParseExp(protocol, , port) = new EndPoint(null, port.toInt, 
SecurityProtocol.valueOf(protocol))
   case uriParseExp(protocol, host, port) = new EndPoint(host, port.toInt, 
SecurityProtocol.valueOf(protocol))

http://git-wip-us.apache.org/repos/asf/kafka/blob/28ecea42/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala 
b/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala
index bb2506c..abe511f 100644
--- a/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala
+++ b/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala
@@ -120,5 +120,11 @@ class BrokerEndPointTest extends JUnit3Suite with Logging {
 assert(endpoint.host == ::1)
 assert(endpoint.port == 9092)
 assert(endpoint.connectionString ==  PLAINTEXT://[::1]:9092)
+// test hostname
+connectionString = PLAINTEXT://MyHostname:9092
+endpoint = EndPoint.createEndPoint(connectionString)
+assert(endpoint.host == MyHostname)
+assert(endpoint.port == 9092)
+assert(endpoint.connectionString ==  PLAINTEXT://MyHostname:9092)
   }
 }



kafka git commit: kafka-2270; incorrect package name in unit tests; patched by Proneet Verma; reviewed by Jun Rao

2015-06-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 54e54f080 - 395716ebf


kafka-2270; incorrect package name in unit tests; patched by Proneet Verma; 
reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/395716eb
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/395716eb
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/395716eb

Branch: refs/heads/trunk
Commit: 395716ebf0af2520a33ef352a62c10a71239bb4f
Parents: 54e54f0
Author: Proneet Verma pronee...@gmail.com
Authored: Tue Jun 16 15:03:40 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 16 15:03:40 2015 -0700

--
 core/src/test/scala/unit/kafka/KafkaConfigTest.scala  |  3 +--
 core/src/test/scala/unit/kafka/common/ConfigTest.scala|  3 +--
 core/src/test/scala/unit/kafka/common/TopicTest.scala |  3 +--
 .../scala/unit/kafka/consumer/PartitionAssignorTest.scala | 10 --
 .../scala/unit/kafka/integration/MinIsrConfigTest.scala   |  3 +--
 core/src/test/scala/unit/kafka/log/LogConfigTest.scala|  3 +--
 .../unit/kafka/server/KafkaConfigConfigDefTest.scala  |  3 +--
 .../unit/kafka/utils/ByteBoundedBlockingQueueTest.scala   |  5 ++---
 .../scala/unit/kafka/utils/CommandLineUtilsTest.scala |  5 ++---
 core/src/test/scala/unit/kafka/zk/ZKPathTest.scala|  4 +---
 10 files changed, 15 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/395716eb/core/src/test/scala/unit/kafka/KafkaConfigTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala 
b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala
index 4cb92de..4764c89 100644
--- a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala
+++ b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala
@@ -14,12 +14,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package unit.kafka
+package kafka
 
 import java.io.{FileOutputStream, File}
 import java.security.Permission
 
-import kafka.Kafka
 import kafka.server.KafkaConfig
 import org.junit.{After, Before, Test}
 import junit.framework.Assert._

http://git-wip-us.apache.org/repos/asf/kafka/blob/395716eb/core/src/test/scala/unit/kafka/common/ConfigTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/common/ConfigTest.scala 
b/core/src/test/scala/unit/kafka/common/ConfigTest.scala
index 7bff96c..0aca938 100644
--- a/core/src/test/scala/unit/kafka/common/ConfigTest.scala
+++ b/core/src/test/scala/unit/kafka/common/ConfigTest.scala
@@ -15,12 +15,11 @@
  * limitations under the License.
  */
 
-package unit.kafka.common
+package kafka.common
 
 import junit.framework.Assert._
 import collection.mutable.ArrayBuffer
 import org.junit.Test
-import kafka.common.InvalidConfigException
 import kafka.producer.ProducerConfig
 import kafka.consumer.ConsumerConfig
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/395716eb/core/src/test/scala/unit/kafka/common/TopicTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/common/TopicTest.scala 
b/core/src/test/scala/unit/kafka/common/TopicTest.scala
index 0fb2588..79532c8 100644
--- a/core/src/test/scala/unit/kafka/common/TopicTest.scala
+++ b/core/src/test/scala/unit/kafka/common/TopicTest.scala
@@ -15,11 +15,10 @@
  * limitations under the License.
  */
 
-package unit.kafka.common
+package kafka.common
 
 import junit.framework.Assert._
 import collection.mutable.ArrayBuffer
-import kafka.common.{Topic, InvalidTopicException}
 import org.junit.Test
 
 class TopicTest {

http://git-wip-us.apache.org/repos/asf/kafka/blob/395716eb/core/src/test/scala/unit/kafka/consumer/PartitionAssignorTest.scala
--
diff --git 
a/core/src/test/scala/unit/kafka/consumer/PartitionAssignorTest.scala 
b/core/src/test/scala/unit/kafka/consumer/PartitionAssignorTest.scala
index 1910fcb..adf0801 100644
--- a/core/src/test/scala/unit/kafka/consumer/PartitionAssignorTest.scala
+++ b/core/src/test/scala/unit/kafka/consumer/PartitionAssignorTest.scala
@@ -15,20 +15,18 @@
  * limitations under the License.
  */
 
-package unit.kafka.consumer
+package kafka.consumer
 
 import org.scalatest.junit.JUnit3Suite
 import org.easymock.EasyMock
 import org.I0Itec.zkclient.ZkClient
 import org.apache.zookeeper.data.Stat
-import kafka.consumer._
 import kafka.utils.{TestUtils, Logging, ZkUtils, Json}
 import junit.framework.Assert._
 import kafka.common.TopicAndPartition
-import unit.kafka.consumer.PartitionAssignorTest.StaticSubscriptionInfo
-import kafka.consumer.ConsumerThreadId

kafka git commit: kafka-2262; LogSegmentSize validation should be consistent; patched by Manikumar Reddy; reviewed by Jun Rao

2015-06-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 478505632 - 9650e12df


kafka-2262; LogSegmentSize validation should be consistent; patched by 
Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/9650e12d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/9650e12d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/9650e12d

Branch: refs/heads/trunk
Commit: 9650e12df2502f85ea665ae46a982d6fd0d2a954
Parents: 4785056
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Tue Jun 16 15:50:17 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 16 15:50:17 2015 -0700

--
 core/src/main/scala/kafka/log/LogConfig.scala | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/9650e12d/core/src/main/scala/kafka/log/LogConfig.scala
--
diff --git a/core/src/main/scala/kafka/log/LogConfig.scala 
b/core/src/main/scala/kafka/log/LogConfig.scala
index a907da0..f64fd79 100755
--- a/core/src/main/scala/kafka/log/LogConfig.scala
+++ b/core/src/main/scala/kafka/log/LogConfig.scala
@@ -22,6 +22,7 @@ import org.apache.kafka.common.utils.Utils
 import scala.collection._
 import org.apache.kafka.common.config.ConfigDef
 import kafka.message.BrokerCompressionCodec
+import kafka.message.Message
 
 object Defaults {
   val SegmentSize = 1024 * 1024
@@ -162,7 +163,7 @@ object LogConfig {
 import java.util.Arrays.asList
 
 new ConfigDef()
-  .define(SegmentBytesProp, INT, Defaults.SegmentSize, atLeast(0), MEDIUM, 
SegmentSizeDoc)
+  .define(SegmentBytesProp, INT, Defaults.SegmentSize, 
atLeast(Message.MinHeaderSize), MEDIUM, SegmentSizeDoc)
   .define(SegmentMsProp, LONG, Defaults.SegmentMs, atLeast(0), MEDIUM, 
SegmentMsDoc)
   .define(SegmentJitterMsProp, LONG, Defaults.SegmentJitterMs, atLeast(0), 
MEDIUM, SegmentJitterMsDoc)
   .define(SegmentIndexBytesProp, INT, Defaults.MaxIndexSize, atLeast(0), 
MEDIUM, MaxIndexSizeDoc)



[1/2] kafka git commit: kafka-2264; SESSION_TIMEOUT_MS_CONFIG in ConsumerConfig should be int; patched by Manikumar Reddy; reviewed by Jun Rao

2015-06-16 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 28ecea421 - 478505632


kafka-2264; SESSION_TIMEOUT_MS_CONFIG in ConsumerConfig should be int; patched 
by Manikumar Reddy; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7009f1d6
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7009f1d6
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7009f1d6

Branch: refs/heads/trunk
Commit: 7009f1d6fffe3866723d1d33a28a4572053eb4e5
Parents: 28ecea4
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Tue Jun 16 15:30:52 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 16 15:30:52 2015 -0700

--
 .../java/org/apache/kafka/clients/consumer/ConsumerConfig.java | 2 +-
 .../java/org/apache/kafka/clients/consumer/KafkaConsumer.java  | 2 +-
 .../apache/kafka/clients/consumer/internals/Coordinator.java   | 6 +++---
 .../kafka/clients/consumer/internals/CoordinatorTest.java  | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/7009f1d6/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
index 1e90524..daff34d 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
@@ -167,7 +167,7 @@ public class ConsumerConfig extends AbstractConfig {
 
CommonClientConfigs.BOOSTRAP_SERVERS_DOC)
 .define(GROUP_ID_CONFIG, Type.STRING, , 
Importance.HIGH, GROUP_ID_DOC)
 .define(SESSION_TIMEOUT_MS_CONFIG,
-Type.LONG,
+Type.INT,
 3,
 Importance.HIGH,
 SESSION_TIMEOUT_MS_DOC)

http://git-wip-us.apache.org/repos/asf/kafka/blob/7009f1d6/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index d1d1ec1..951c34c 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -484,7 +484,7 @@ public class KafkaConsumerK, V implements ConsumerK, V {
 this.coordinator = new Coordinator(this.client,
 config.getString(ConsumerConfig.GROUP_ID_CONFIG),
 this.retryBackoffMs,
-config.getLong(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG),
+config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG),
 
config.getString(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG),
 this.metadata,
 this.subscriptions,

http://git-wip-us.apache.org/repos/asf/kafka/blob/7009f1d6/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java
index c1496a0..41cb945 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java
@@ -69,7 +69,7 @@ public final class Coordinator {
 private final String groupId;
 private final Metadata metadata;
 private final Heartbeat heartbeat;
-private final long sessionTimeoutMs;
+private final int sessionTimeoutMs;
 private final String assignmentStrategy;
 private final SubscriptionState subscriptions;
 private final CoordinatorMetrics sensors;
@@ -84,7 +84,7 @@ public final class Coordinator {
 public Coordinator(KafkaClient client,
String groupId,
long retryBackoffMs,
-   long sessionTimeoutMs,
+   int sessionTimeoutMs,
String assignmentStrategy,
Metadata metadata,
SubscriptionState subscriptions,
@@ -123,7 +123,7 @@ public final class Coordinator {
 

[2/2] kafka git commit: kafka-2252; Socket connection closing is logged, but not corresponding opening of socket; patched by Gwen Shapira; reviewed by Jun Rao

2015-06-16 Thread junrao
kafka-2252; Socket connection closing is logged, but not corresponding opening 
of socket; patched by Gwen Shapira; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/47850563
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/47850563
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/47850563

Branch: refs/heads/trunk
Commit: 478505632edc8f4b51e4ed561d1adf455256c3e4
Parents: 7009f1d
Author: Gwen Shapira csh...@gmail.com
Authored: Tue Jun 16 15:37:58 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Tue Jun 16 15:37:58 2015 -0700

--
 .../src/main/java/org/apache/kafka/common/network/Selector.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/47850563/clients/src/main/java/org/apache/kafka/common/network/Selector.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/network/Selector.java 
b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
index 1da215b..4aee214 100644
--- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java
+++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
@@ -267,6 +267,7 @@ public class Selector implements Selectable {
 key.interestOps(key.interestOps()  
~SelectionKey.OP_CONNECT | SelectionKey.OP_READ);
 this.connected.add(transmissions.id);
 this.sensors.connectionCreated.record();
+log.debug(Connection {} created, transmissions.id);
 }
 
 /* read from any connections that have readable data */
@@ -307,7 +308,7 @@ public class Selector implements Selectable {
 } catch (IOException e) {
 String desc = socketDescription(channel);
 if (e instanceof EOFException || e instanceof 
ConnectException)
-log.info(Connection {} disconnected, desc);
+log.debug(Connection {} disconnected, desc);
 else
 log.warn(Error in I/O with connection to {}, desc, 
e);
 close(transmissions.id);



kafka git commit: kafka-2232; make MockProducer generic; patched by Alexander Pakulov; reviewed by Jun Rao

2015-06-12 Thread junrao
Repository: kafka
Updated Branches:
  refs/heads/trunk 017c00caf - d31a2c238


kafka-2232; make MockProducer generic; patched by Alexander Pakulov; reviewed 
by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d31a2c23
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d31a2c23
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d31a2c23

Branch: refs/heads/trunk
Commit: d31a2c2381bebc9c4b27e36fdf986183732e13eb
Parents: 017c00c
Author: Alexander Pakulov a.paku...@gmail.com
Authored: Fri Jun 12 14:16:03 2015 -0700
Committer: Jun Rao jun...@gmail.com
Committed: Fri Jun 12 14:16:03 2015 -0700

--
 .../kafka/clients/producer/MockProducer.java| 53 
 .../clients/producer/MockProducerTest.java  | 31 ++--
 .../org/apache/kafka/test/MockSerializer.java   |  1 -
 3 files changed, 58 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d31a2c23/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
index e66491c..36e7ffa 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
@@ -30,6 +30,7 @@ import 
org.apache.kafka.clients.producer.internals.FutureRecordMetadata;
 import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
 import org.apache.kafka.clients.producer.internals.ProduceRequestResult;
 import org.apache.kafka.common.*;
+import org.apache.kafka.common.serialization.Serializer;
 
 
 /**
@@ -38,14 +39,16 @@ import org.apache.kafka.common.*;
  * By default this mock will synchronously complete each send call 
successfully. However it can be configured to allow
  * the user to control the completion of the call and supply an optional error 
for the producer to throw.
  */
-public class MockProducer implements Producerbyte[], byte[] {
+public class MockProducerK, V implements ProducerK, V {
 
 private final Cluster cluster;
-private final Partitioner partitioner = new DefaultPartitioner();
-private final ListProducerRecordbyte[], byte[] sent;
+private final Partitioner partitioner;
+private final ListProducerRecordK, V sent;
 private final DequeCompletion completions;
 private boolean autoComplete;
 private MapTopicPartition, Long offsets;
+private final SerializerK keySerializer;
+private final SerializerV valueSerializer;
 
 /**
  * Create a mock producer
@@ -55,31 +58,37 @@ public class MockProducer implements Producerbyte[], 
byte[] {
  *the user must call {@link #completeNext()} or {@link 
#errorNext(RuntimeException)} after
  *{@link #send(ProducerRecord) send()} to complete the call and 
unblock the @{link
  *java.util.concurrent.Future Futurelt;RecordMetadatagt;} that 
is returned.
+ * @param partitioner The partition strategy
+ * @param keySerializer The serializer for key that implements {@link 
Serializer}.
+ * @param valueSerializer The serializer for value that implements {@link 
Serializer}.
  */
-public MockProducer(Cluster cluster, boolean autoComplete) {
+public MockProducer(Cluster cluster, boolean autoComplete, Partitioner 
partitioner, SerializerK keySerializer, SerializerV valueSerializer) {
 this.cluster = cluster;
 this.autoComplete = autoComplete;
+this.partitioner = partitioner;
+this.keySerializer = keySerializer;
+this.valueSerializer = valueSerializer;
 this.offsets = new HashMapTopicPartition, Long();
-this.sent = new ArrayListProducerRecordbyte[], byte[]();
+this.sent = new ArrayListProducerRecordK, V();
 this.completions = new ArrayDequeCompletion();
 }
 
 /**
- * Create a new mock producer with invented metadata the given 
autoComplete setting.
+ * Create a new mock producer with invented metadata the given 
autoComplete setting and key\value serializers
  *
- * Equivalent to {@link #MockProducer(Cluster, boolean) new 
MockProducer(null, autoComplete)}
+ * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, 
Serializer, Serializer)} new MockProducer(Cluster.empty(), autoComplete, new 
DefaultPartitioner(), keySerializer, valueSerializer)}
  */
-public MockProducer(boolean autoComplete) {
-this(Cluster.empty(), autoComplete);
+public MockProducer(boolean autoComplete, SerializerK keySerializer, 
SerializerV valueSerializer) {
+this(Cluster.empty(), autoComplete, new 

<    1   2   3   4   5   6   7   8   9   10   >