kafka git commit: KAFKA-2415; Fix transient failure in LogRecoveryTest

2015-08-07 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk a36befd81 - 68ad80f85


KAFKA-2415; Fix transient failure in LogRecoveryTest

Author: Jiangjie Qin becket@gmail.com
Author: Jiangjie Qin j...@jqin-ld1.linkedin.biz

Reviewers: Ismael Juma, Gwen Shapira

Closes #121 from becketqin/KAFKA-2415 and squashes the following commits:

7a9f453 [Jiangjie Qin] Addressed Ismael's comment
346103c [Jiangjie Qin] KAFKA-2415: Fix transient failure in LogRecoveryTest


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/68ad80f8
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/68ad80f8
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/68ad80f8

Branch: refs/heads/trunk
Commit: 68ad80f8524931222d78a8125cd012321d784337
Parents: a36befd
Author: Jiangjie Qin becket@gmail.com
Authored: Fri Aug 7 16:16:34 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Fri Aug 7 16:16:34 2015 -0700

--
 core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/68ad80f8/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala 
b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
index 7688f26..21081ce 100755
--- a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
+++ b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala
@@ -210,6 +210,9 @@ class LogRecoveryTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 sendMessages(2)
 hw += 2
 
+// allow some time for the follower to create replica
+TestUtils.waitUntilTrue(() = server1.replicaManager.getReplica(topic, 
0).nonEmpty,
+  Failed to create replica in follower after timeout)
 // allow some time for the follower to get the leader HW
 TestUtils.waitUntilTrue(() =
   server1.replicaManager.getReplica(topic, 
0).get.highWatermark.messageOffset == hw,



kafka git commit: Minor: Fixes to Selector's javadoc

2015-08-07 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 63b820c59 - 47c99f387


Minor: Fixes to Selector's javadoc

Author: Ismael Juma ism...@juma.me.uk

Closes #126 from ijuma/minor-selector-javadoc-fixes and squashes the following 
commits:

a26f529 [Ismael Juma] Minor fixes to Selector's javadoc


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/47c99f38
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/47c99f38
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/47c99f38

Branch: refs/heads/trunk
Commit: 47c99f38726dd5bd3bf2c2f8a8a999d36b53ffcf
Parents: 63b820c
Author: Ismael Juma ism...@juma.me.uk
Authored: Fri Aug 7 16:07:45 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Fri Aug 7 16:07:45 2015 -0700

--
 .../org/apache/kafka/common/network/Selector.java| 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/47c99f38/clients/src/main/java/org/apache/kafka/common/network/Selector.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/network/Selector.java 
b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
index aaf60c9..ce20111 100644
--- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java
+++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java
@@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
  * A connection can be added to the nioSelector associated with an integer id 
by doing
  * 
  * pre
- * nioSelector.connect(42, new InetSocketAddress(quot;google.comquot;, 
server.port), 64000, 64000);
+ * nioSelector.connect(quot;42quot;, new 
InetSocketAddress(quot;google.comquot;, server.port), 64000, 64000);
  * /pre
  * 
  * The connect call does not block on the creation of the TCP connection, so 
the connect method only begins initiating
@@ -55,8 +55,9 @@ import org.slf4j.LoggerFactory;
  * connections are all done using the codepoll()/code call.
  * 
  * pre
- * Listlt;NetworkRequestgt; requestsToSend = Arrays.asList(new 
NetworkRequest(0, myBytes), new NetworkRequest(1, myOtherBytes));
- * nioSelector.poll(TIMEOUT_MS, requestsToSend);
+ * nioSelector.send(new NetworkSend(myDestination, myBytes));
+ * nioSelector.send(new NetworkSend(myOtherDestination, myOtherBytes));
+ * nioSelector.poll(TIMEOUT_MS);
  * /pre
  * 
  * The nioSelector maintains several lists that are reset by each call to 
codepoll()/code which are available via
@@ -123,7 +124,7 @@ public class Selector implements Selectable {
  * Begin connecting to the given address and add the connection to this 
nioSelector associated with the given id
  * number.
  * p
- * Note that this call only initiates the connection, which will be 
completed on a future {@link #poll(long, List)}
+ * Note that this call only initiates the connection, which will be 
completed on a future {@link #poll(long)}
  * call. Check {@link #connected()} to see which (if any) connections have 
completed after a given poll call.
  * @param id The id for the new connection
  * @param address The address to connect to
@@ -171,7 +172,7 @@ public class Selector implements Selectable {
 
 /**
  * Disconnect any connections for the given id (if there are any). The 
disconnection is asynchronous and will not be
- * processed until the next {@link #poll(long, List) poll()} call.
+ * processed until the next {@link #poll(long) poll()} call.
  */
 @Override
 public void disconnect(String id) {
@@ -228,8 +229,8 @@ public class Selector implements Selectable {
  * 
  * When this call is completed the user can check for completed sends, 
receives, connections or disconnects using
  * {@link #completedSends()}, {@link #completedReceives()}, {@link 
#connected()}, {@link #disconnected()}. These
- * lists will be cleared at the beginning of each {@link #poll(long, 
List)} call and repopulated by the call if any
- * completed I/O.
+ * lists will be cleared at the beginning of each {@link #poll(long)} call 
and repopulated by the call if there is
+ * any completed I/O.
  * 
  * @param timeout The amount of time to wait, in milliseconds. If 
negative, wait indefinitely.
  * @throws IllegalStateException If a send is given for which we have no 
existing connection or for which there is



kafka git commit: KAFKA-2429: Add annotations to mark classes as stable/unstable

2015-08-12 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 3902dc024 - 04b0d870b


KAFKA-2429: Add annotations to mark classes as stable/unstable

This also marks the consumer as unstable to show an example of using these 
annotations.

Author: Ewen Cheslack-Postava m...@ewencp.org

Reviewers: Gwen Shapira

Closes #133 from ewencp/stability-annotations and squashes the following 
commits:

09c15c3 [Ewen Cheslack-Postava] KAFKA-2429: Add annotations to mark classes as 
stable/unstable


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/04b0d870
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/04b0d870
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/04b0d870

Branch: refs/heads/trunk
Commit: 04b0d870b263117e301584bfc00dd8e81486617a
Parents: 3902dc0
Author: Ewen Cheslack-Postava m...@ewencp.org
Authored: Wed Aug 12 14:57:42 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Wed Aug 12 14:57:42 2015 -0700

--
 .../apache/kafka/clients/consumer/Consumer.java |  2 +
 .../kafka/clients/consumer/KafkaConsumer.java   |  2 +
 .../common/annotation/InterfaceStability.java   | 48 
 3 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/04b0d870/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
index 158e1ea..76834ad 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java
@@ -21,11 +21,13 @@ import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.MetricName;
+import org.apache.kafka.common.annotation.InterfaceStability;
 
 /**
  * @see KafkaConsumer
  * @see MockConsumer
  */
+@InterfaceStability.Unstable
 public interface ConsumerK, V extends Closeable {
 
 /**

http://git-wip-us.apache.org/repos/asf/kafka/blob/04b0d870/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index ed99e9b..be46b6c 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -26,6 +26,7 @@ import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.annotation.InterfaceStability;
 import org.apache.kafka.common.metrics.JmxReporter;
 import org.apache.kafka.common.metrics.MetricConfig;
 import org.apache.kafka.common.metrics.Metrics;
@@ -390,6 +391,7 @@ import static org.apache.kafka.common.utils.Utils.min;
  * commit.
  * 
  */
+@InterfaceStability.Unstable
 public class KafkaConsumerK, V implements ConsumerK, V {
 
 private static final Logger log = 
LoggerFactory.getLogger(KafkaConsumer.class);

http://git-wip-us.apache.org/repos/asf/kafka/blob/04b0d870/clients/src/main/java/org/apache/kafka/common/annotation/InterfaceStability.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/annotation/InterfaceStability.java
 
b/clients/src/main/java/org/apache/kafka/common/annotation/InterfaceStability.java
new file mode 100644
index 000..0d38f56
--- /dev/null
+++ 
b/clients/src/main/java/org/apache/kafka/common/annotation/InterfaceStability.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
+ * to You under the Apache License, Version 2.0 (the License); you may not 
use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License is distributed on
+ * an AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 
express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package org.apache.kafka.common.annotation;
+
+import 

kafka git commit: KAFKA-2436; log.retention.hours should be honored by LogManager

2015-08-18 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 786867c2e - 503bd3664


KAFKA-2436; log.retention.hours should be honored by LogManager

Author: Dong Lin lindon...@gmail.com

Reviewers: Joel Koshy, Gwen Shapira

Closes #142 from lindong28/KAFKA-2436


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/503bd366
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/503bd366
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/503bd366

Branch: refs/heads/trunk
Commit: 503bd36647695e8cc91893ffb80346dd03eb0bc5
Parents: 786867c
Author: Dong Lin lindon...@gmail.com
Authored: Tue Aug 18 13:03:11 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Tue Aug 18 13:03:11 2015 -0700

--
 core/src/main/scala/kafka/log/LogConfig.scala   |  5 +-
 .../main/scala/kafka/server/KafkaConfig.scala   |  8 +--
 .../main/scala/kafka/server/KafkaServer.scala   | 65 ++--
 .../scala/unit/kafka/log/LogConfigTest.scala| 19 ++
 4 files changed, 58 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/503bd366/core/src/main/scala/kafka/log/LogConfig.scala
--
diff --git a/core/src/main/scala/kafka/log/LogConfig.scala 
b/core/src/main/scala/kafka/log/LogConfig.scala
index c969d16..7fc7e33 100755
--- a/core/src/main/scala/kafka/log/LogConfig.scala
+++ b/core/src/main/scala/kafka/log/LogConfig.scala
@@ -47,7 +47,10 @@ object Defaults {
 }
 
 case class LogConfig(props: java.util.Map[_, _]) extends 
AbstractConfig(LogConfig.configDef, props, false) {
-
+  /**
+   * Important note: Any configuration parameter that is passed along from 
KafkaConfig to LogConfig
+   * should also go in copyKafkaConfigToLog.
+   */
   val segmentSize = getInt(LogConfig.SegmentBytesProp)
   val segmentMs = getLong(LogConfig.SegmentMsProp)
   val segmentJitterMs = getLong(LogConfig.SegmentJitterMsProp)

http://git-wip-us.apache.org/repos/asf/kafka/blob/503bd366/core/src/main/scala/kafka/server/KafkaConfig.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index 394f21b..c39402c 100755
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -335,7 +335,7 @@ object KafkaConfig {
   val LogRetentionTimeHoursDoc = The number of hours to keep a log file 
before deleting it (in hours), tertiary to  + LogRetentionTimeMillisProp +  
property
 
   val LogRetentionBytesDoc = The maximum size of the log before deleting it
-  val LogCleanupIntervalMsDoc = The frequency in minutes that the log cleaner 
checks whether any log is eligible for deletion
+  val LogCleanupIntervalMsDoc = The frequency in milliseconds that the log 
cleaner checks whether any log is eligible for deletion
   val LogCleanupPolicyDoc = The default cleanup policy for segments beyond 
the retention window, must be either \delete\ or \compact\
   val LogCleanerThreadsDoc = The number of background threads to use for log 
cleaning
   val LogCleanerIoMaxBytesPerSecondDoc = The log cleaner will be throttled so 
that the sum of its read and write i/o will be less than this value on average
@@ -652,8 +652,9 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends 
AbstractConfig(Kafka
   val logRollTimeMillis: java.lang.Long = 
Option(getLong(KafkaConfig.LogRollTimeMillisProp)).getOrElse(60 * 60 * 1000L * 
getInt(KafkaConfig.LogRollTimeHoursProp))
   val logRollTimeJitterMillis: java.lang.Long = 
Option(getLong(KafkaConfig.LogRollTimeJitterMillisProp)).getOrElse(60 * 60 * 
1000L * getInt(KafkaConfig.LogRollTimeJitterHoursProp))
   val logFlushIntervalMs: java.lang.Long = 
Option(getLong(KafkaConfig.LogFlushIntervalMsProp)).getOrElse(getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp))
+  val logRetentionTimeMillis = getLogRetentionTimeMillis
   val minInSyncReplicas = getInt(KafkaConfig.MinInSyncReplicasProp)
-  val logPreAllocateEnable: Boolean = 
getBoolean(KafkaConfig.LogPreAllocateProp)
+  val logPreAllocateEnable: java.lang.Boolean = 
getBoolean(KafkaConfig.LogPreAllocateProp)
 
   /** * Replication configuration ***/
   val controllerSocketTimeoutMs: Int = 
getInt(KafkaConfig.ControllerSocketTimeoutMsProp)
@@ -672,7 +673,7 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends 
AbstractConfig(Kafka
   val autoLeaderRebalanceEnable = 
getBoolean(KafkaConfig.AutoLeaderRebalanceEnableProp)
   val leaderImbalancePerBrokerPercentage = 
getInt(KafkaConfig.LeaderImbalancePerBrokerPercentageProp)
   val leaderImbalanceCheckIntervalSeconds = 
getLong(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp)
-  val uncleanLeaderElectionEnable = 

kafka git commit: KAFKA-2330: Vagrantfile sets global configs instead of per-provider override configs; patched by Ewen Cheslack-Postava, reviewed by Geoff Anderson and Gwen Shapira

2015-08-19 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 0b04f9f34 - e4fc456ab


KAFKA-2330: Vagrantfile sets global configs instead of per-provider override 
configs; patched by Ewen Cheslack-Postava, reviewed by Geoff Anderson and Gwen 
Shapira


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e4fc456a
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e4fc456a
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e4fc456a

Branch: refs/heads/trunk
Commit: e4fc456ab06a228c4228cbae58d391de1b768f9e
Parents: 0b04f9f
Author: Ewen Cheslack-Postava m...@ewencp.org
Authored: Wed Aug 19 18:20:13 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Wed Aug 19 18:20:51 2015 -0700

--
 Vagrantfile   | 6 +++---
 vagrant/README.md | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e4fc456a/Vagrantfile
--
diff --git a/Vagrantfile b/Vagrantfile
index 28bf24a..31b99b4 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -87,14 +87,14 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
 vb.customize [modifyvm, :id, --memory, ram_megabytes.to_s]
 
 if Vagrant.has_plugin?(vagrant-cachier)
-  config.cache.scope = :box
+  override.cache.scope = :box
   # Besides the defaults, we use a custom cache to handle the Oracle JDK
   # download, which downloads via wget during an apt install. Because of 
the
   # way the installer ends up using its cache directory, we need to jump
   # through some hoops instead of just specifying a cache directly -- we
   # share to a temporary location and the provisioning scripts symlink data
   # to the right location.
-  config.cache.enable :generic, {
+  override.cache.enable :generic, {
 oracle-jdk7 = { cache_dir: /tmp/oracle-jdk7-installer-cache },
   }
 end
@@ -148,7 +148,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
 end
 
 # Exclude some directories that can grow very large from syncing
-config.vm.synced_folder ., /vagrant, type: rsync, :rsync_excludes = 
['.git', 'core/data/', 'logs/', 'system_test/']
+override.vm.synced_folder ., /vagrant, type: rsync, :rsync_excludes 
= ['.git', 'core/data/', 'logs/', 'system_test/', 'tests/results/', 'results/']
   end
 
   def name_node(node, name)

http://git-wip-us.apache.org/repos/asf/kafka/blob/e4fc456a/vagrant/README.md
--
diff --git a/vagrant/README.md b/vagrant/README.md
index 73cf039..6fa8e78 100644
--- a/vagrant/README.md
+++ b/vagrant/README.md
@@ -6,8 +6,8 @@ Using Vagrant to get up and running.
 2) Install Vagrant = 1.6.4 
[http://www.vagrantup.com/](http://www.vagrantup.com/)
 3) Install Vagrant Plugins:
 
-# Required
-$ vagrant plugin install vagrant-hostmanager
+# Required (1.5.0 or 1.4 currently required due to implementation changes 
in the plugin)
+$ vagrant plugin install vagrant-hostmanager --plugin-version 1.5.0
 # Optional
 $ vagrant plugin install vagrant-cachier # Caches  shares package 
downloads across VMs
 



kafka git commit: KAFKA-2457; Fix how the argument is passed to `compileScala`

2015-08-21 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 3285a9c89 - 6acd37720


KAFKA-2457; Fix how the argument is passed to `compileScala`

Author: Ismael Juma ism...@juma.me.uk

Reviewers: Gwen Shapira

Closes #159 from ijuma/kafka-2457-fix


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/6acd3772
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/6acd3772
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/6acd3772

Branch: refs/heads/trunk
Commit: 6acd37720df402a665094ba1153ae200ad5d6705
Parents: 3285a9c
Author: Ismael Juma ism...@juma.me.uk
Authored: Fri Aug 21 12:52:30 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Fri Aug 21 12:52:30 2015 -0700

--
 build.gradle | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/6acd3772/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 228cbe9..3fd54cf 100644
--- a/build.gradle
+++ b/build.gradle
@@ -169,7 +169,7 @@ subprojects {
 
 configure(scalaCompileOptions.forkOptions) {
   memoryMaximumSize = '1g'
-  jvmArgs = ['-XX:MaxPermSize=512m -Xss2m']
+  jvmArgs = ['-XX:MaxPermSize=512m', '-Xss2m']
 }
   }
 }



kafka git commit: KAFKA-2439; Add MirrorMaker service class for system tests

2015-08-22 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 6acd37720 - 1d2ae89c5


KAFKA-2439; Add MirrorMaker service class for system tests

Added MirrorMaker service and a few corresponding sanity checks, as well as 
necessary config template files. A few additional updates to accomodate the 
change in wait_until from ducktape0.2.0-0.3.0

Author: Geoff Anderson ge...@confluent.io

Reviewers: Ewen Cheslack-Postava, Gwen Shapira

Closes #148 from granders/KAFKA-2439 and squashes the following commits:

c7c3ebd [Geoff Anderson] MirrorMaker now can run as multi-node service. Added 
kill -9 to various clean_node methods.
1e806f2 [Geoff Anderson] Various cleanups per review.
1b4b049 [Geoff Anderson] Added MirrorMaker service and a few corresponding 
sanity checks, as well as necessary config template files. A few additional 
updates to accomodate the change in wait_until from ducktape0.2.0-0.3.0


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1d2ae89c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1d2ae89c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1d2ae89c

Branch: refs/heads/trunk
Commit: 1d2ae89c5a1dc5d18b8188bf737a8e1d195be325
Parents: 6acd377
Author: Geoff Anderson ge...@confluent.io
Authored: Sat Aug 22 19:23:36 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Sat Aug 22 19:23:36 2015 -0700

--
 .../sanity_checks/test_console_consumer.py  |  12 +-
 .../sanity_checks/test_mirror_maker.py  |  90 ++
 tests/kafkatest/services/console_consumer.py|  17 +-
 tests/kafkatest/services/kafka.py   |   1 +
 tests/kafkatest/services/mirror_maker.py| 165 +++
 .../templates/console_consumer.properties   |   4 +-
 .../templates/console_consumer_log4j.properties |  26 ---
 .../services/templates/consumer.properties  |  23 +++
 .../services/templates/kafka.properties |  80 -
 .../services/templates/producer.properties  |  28 
 .../services/templates/tools_log4j.properties   |  26 +++
 tests/kafkatest/services/verifiable_producer.py |   4 +
 tests/kafkatest/services/zookeeper.py   |  16 ++
 tests/kafkatest/tests/replication_test.py   |   8 +-
 tests/setup.py  |   2 +-
 15 files changed, 379 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1d2ae89c/tests/kafkatest/sanity_checks/test_console_consumer.py
--
diff --git a/tests/kafkatest/sanity_checks/test_console_consumer.py 
b/tests/kafkatest/sanity_checks/test_console_consumer.py
index cd8c8f9..3e523e1 100644
--- a/tests/kafkatest/sanity_checks/test_console_consumer.py
+++ b/tests/kafkatest/sanity_checks/test_console_consumer.py
@@ -61,20 +61,20 @@ class ConsoleConsumerTest(Test):
 self.consumer.start()
 node = self.consumer.nodes[0]
 
-if not wait_until(lambda: self.consumer.alive(node), timeout_sec=10, 
backoff_sec=.2):
-raise Exception(Consumer was too slow to start)
+wait_until(lambda: self.consumer.alive(node), 
+timeout_sec=10, backoff_sec=.2, err_msg=Consumer was too slow to 
start)
 self.logger.info(consumer started in %s seconds  % str(time.time() - 
t0))
 
 # Verify that log output is happening
-if not wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), 
timeout_sec=10):
-raise Exception(Timed out waiting for log file to exist)
+wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), 
timeout_sec=10,
+   err_msg=Timed out waiting for logging to start.)
 assert line_count(node, ConsoleConsumer.LOG_FILE)  0
 
 # Verify no consumed messages
 assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0
 
 self.consumer.stop_node(node)
-if not wait_until(lambda: not self.consumer.alive(node), 
timeout_sec=10, backoff_sec=.2):
-raise Exception(Took too long for consumer to die.)
+
+
 
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/1d2ae89c/tests/kafkatest/sanity_checks/test_mirror_maker.py
--
diff --git a/tests/kafkatest/sanity_checks/test_mirror_maker.py 
b/tests/kafkatest/sanity_checks/test_mirror_maker.py
new file mode 100644
index 000..3481d7a
--- /dev/null
+++ b/tests/kafkatest/sanity_checks/test_mirror_maker.py
@@ -0,0 +1,90 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); 

kafka git commit: KAFKA-2438; add maxParallelForks to build.gradle to speedup tests.

2015-08-16 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 63b89658b - 786867c2e


KAFKA-2438; add maxParallelForks to build.gradle to speedup tests.

Author: Sriharsha Chintalapani har...@hortonworks.com

Reviewers: Gwen Shapira

Closes #143 from harshach/KAFKA-2438


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/786867c2
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/786867c2
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/786867c2

Branch: refs/heads/trunk
Commit: 786867c2e18f79fa17be120f78a253bb9822a861
Parents: 63b8965
Author: Sriharsha Chintalapani har...@hortonworks.com
Authored: Sun Aug 16 20:53:39 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Sun Aug 16 20:53:39 2015 -0700

--
 build.gradle | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/786867c2/build.gradle
--
diff --git a/build.gradle b/build.gradle
index c7f66be..983587f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -114,6 +114,10 @@ subprojects {
 }
   }
 
+  tasks.withType(Test) {
+maxParallelForks = Runtime.runtime.availableProcessors()
+  }
+
   jar {
 from '../LICENSE'
 from '../NOTICE'



kafka git commit: KAFKA-2100; Client Error doesn't preserve or display original server error code when it is an unknown code; Reviewed by Gwen, Guozhang and Ewen

2015-07-29 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk e43c9aff9 - b7bd2978d


KAFKA-2100; Client Error doesn't preserve or display original server error code 
when it is an unknown code; Reviewed by Gwen, Guozhang and Ewen


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/b7bd2978
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/b7bd2978
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/b7bd2978

Branch: refs/heads/trunk
Commit: b7bd2978dc3947297fefc06ff9b22949d5bd1b50
Parents: e43c9af
Author: David Jacot david.ja...@gmail.com
Authored: Wed Jul 29 10:34:42 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Wed Jul 29 10:34:42 2015 -0700

--
 .../java/org/apache/kafka/common/protocol/Errors.java  | 13 +++--
 1 file changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/b7bd2978/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java
--
diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java 
b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java
index d6c41c1..e17e390 100644
--- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java
+++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java
@@ -20,6 +20,8 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.kafka.common.errors.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class contains all the client-server errors--those errors that must be 
sent from the server to the client. These
@@ -83,6 +85,8 @@ public enum Errors {
 INVALID_COMMIT_OFFSET_SIZE(28,
 new ApiException(The committing offset data size is not valid));
 
+private static final Logger log = LoggerFactory.getLogger(Errors.class);
+
 private static MapClass?, Errors classToError = new HashMapClass?, 
Errors();
 private static MapShort, Errors codeToError = new HashMapShort, 
Errors();
 
@@ -130,11 +134,16 @@ public enum Errors {
  */
 public static Errors forCode(short code) {
 Errors error = codeToError.get(code);
-return error == null ? UNKNOWN : error;
+if (error != null) {
+return error;
+} else {
+log.warn(Unexpected error code: {}., code);
+return UNKNOWN;
+}
 }
 
 /**
- * Return the error instance associated with this exception (or UKNOWN if 
there is none)
+ * Return the error instance associated with this exception (or UNKNOWN if 
there is none)
  */
 public static Errors forException(Throwable t) {
 Errors error = classToError.get(t.getClass());



kafka git commit: KAFKA-2386; increase timeouts for transient test failure in ConsumerCoordinatorResponseTests

2015-08-03 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk cd3dc7a5c - 3c0963084


KAFKA-2386; increase timeouts for transient test failure in 
ConsumerCoordinatorResponseTests

There are two race conditions in the test case 
testGenerationIdIncrementsOnRebalance. First, a delay before the second join 
group request can timeout the initial group and cause the generationId to 
unexpectedly reset. Second, a delay in the join group request handling will 
timeout the request itself and cause the test to fail.  This commit doesn't 
address these race conditions, but increases the timeouts to make them more 
unlikely. If the problem reoccurs, then we'll probably need a better solution.

Author: Jason Gustafson ja...@confluent.io

Reviewers: Gwen Shapira csh...@gmail.com

Closes #107 from hachikuji/KAFKA-2386 and squashes the following commits:

a53460a [Jason Gustafson] KAFKA-2386; increase timeouts for transient test 
failure in ConsumerCoordinatorResponseTest


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/3c096308
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/3c096308
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/3c096308

Branch: refs/heads/trunk
Commit: 3c09630844f6e70793f53a9d4f0ef562fe9d91d3
Parents: cd3dc7a
Author: Jason Gustafson ja...@confluent.io
Authored: Mon Aug 3 15:42:33 2015 -0700
Committer: Chen Shapira g...@macbook-pro.gateway.sonic.net
Committed: Mon Aug 3 15:42:33 2015 -0700

--
 .../kafka/coordinator/ConsumerCoordinatorResponseTest.scala | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/3c096308/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala
--
diff --git 
a/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala
 
b/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala
index 87a5330..058daef 100644
--- 
a/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala
+++ 
b/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala
@@ -43,8 +43,8 @@ class ConsumerCoordinatorResponseTest extends JUnitSuite {
   type HeartbeatCallback = Short = Unit
 
   val ConsumerMinSessionTimeout = 10
-  val ConsumerMaxSessionTimeout = 100
-  val DefaultSessionTimeout = 20
+  val ConsumerMaxSessionTimeout = 200
+  val DefaultSessionTimeout = 100
   var consumerCoordinator: ConsumerCoordinator = null
   var offsetManager : OffsetManager = null
 
@@ -238,7 +238,7 @@ class ConsumerCoordinatorResponseTest extends JUnitSuite {
 
 // First start up a group (with a slightly larger timeout to give us time 
to heartbeat when the rebalance starts)
 val joinGroupResult = joinGroup(groupId, 
JoinGroupRequest.UNKNOWN_CONSUMER_ID, partitionAssignmentStrategy,
-  100, isCoordinatorForGroup = true)
+  DefaultSessionTimeout, isCoordinatorForGroup = true)
 val assignedConsumerId = joinGroupResult._2
 val initialGenerationId = joinGroupResult._3
 val joinGroupErrorCode = joinGroupResult._4
@@ -310,7 +310,8 @@ class ConsumerCoordinatorResponseTest extends JUnitSuite {
 sessionTimeout: Int,
 isCoordinatorForGroup: Boolean): 
JoinGroupCallbackParams = {
 val responseFuture = sendJoinGroup(groupId, consumerId, 
partitionAssignmentStrategy, sessionTimeout, isCoordinatorForGroup)
-Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
+// should only have to wait as long as session timeout, but allow some 
extra time in case of an unexpected delay
+Await.result(responseFuture, Duration(sessionTimeout+100, 
TimeUnit.MILLISECONDS))
   }
 
   private def heartbeat(groupId: String,



kafka git commit: KAFKA-2407: Only create log directory when it will be used

2015-08-05 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 9cefb2a0f - fc40016cf


KAFKA-2407: Only create log directory when it will be used

Author: Grant Henke granthe...@gmail.com

Reviewers: Gwen Shapira

Closes #115 from granthenke/log-fix and squashes the following commits:

de36138 [Grant Henke] Small comment fix
49a8dd4 [Grant Henke] KAFKA-2407: Only create log directory when it will be used


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/fc40016c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/fc40016c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/fc40016c

Branch: refs/heads/trunk
Commit: fc40016cf3cc4143f2e8486722737da3461771e6
Parents: 9cefb2a
Author: Grant Henke granthe...@gmail.com
Authored: Wed Aug 5 11:46:11 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Wed Aug 5 11:46:11 2015 -0700

--
 bin/kafka-run-class.sh | 20 +++-
 1 file changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/fc40016c/bin/kafka-run-class.sh
--
diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh
index ebe7409..2f00f68 100755
--- a/bin/kafka-run-class.sh
+++ b/bin/kafka-run-class.sh
@@ -22,15 +22,6 @@ fi
 
 base_dir=$(dirname $0)/..
 
-# create logs directory
-if [ x$LOG_DIR = x ]; then
-LOG_DIR=$base_dir/logs
-fi
-
-if [ ! -d $LOG_DIR ]; then
-mkdir -p $LOG_DIR
-fi
-
 if [ -z $SCALA_VERSION ]; then
SCALA_VERSION=2.10.5
 fi
@@ -96,9 +87,20 @@ if [  $JMX_PORT ]; then
   KAFKA_JMX_OPTS=$KAFKA_JMX_OPTS 
-Dcom.sun.management.jmxremote.port=$JMX_PORT 
 fi
 
+# Log directory to use
+if [ x$LOG_DIR = x ]; then
+LOG_DIR=$base_dir/logs
+fi
+
 # Log4j settings
 if [ -z $KAFKA_LOG4J_OPTS ]; then
+  # Log to console. This is a tool.
   
KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:$base_dir/config/tools-log4j.properties
+else
+  # create logs directory
+  if [ ! -d $LOG_DIR ]; then
+mkdir -p $LOG_DIR
+  fi
 fi
 
 KAFKA_LOG4J_OPTS=-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS



kafka git commit: MINOR: Added to .gitignore Kafka server logs directory

2015-08-03 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk b152c0604 - cd3dc7a5c


MINOR: Added to .gitignore Kafka server logs directory

When running Kafka server from sources, logs directory gets created in root of 
repository, and kafka server logs end up there. Currently that directory is not 
ignored by git.

This change adds root logs directory to .gitignore so that Kafka server logs 
are ignored and do not get tracked by git.

Author: Stevo Slavić ssla...@gmail.com

Reviewers: Ismael Juma

Closes #94 from sslavic/patch-7 and squashes the following commits:

c7b62a7 [Stevo Slavić] MINOR: Added to .gitignore Kafka server logs


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/cd3dc7a5
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/cd3dc7a5
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/cd3dc7a5

Branch: refs/heads/trunk
Commit: cd3dc7a5c9f3a68ad73d2ae7c975f9882f00036e
Parents: b152c06
Author: Stevo Slavić ssla...@gmail.com
Authored: Mon Aug 3 14:12:00 2015 -0700
Committer: Chen Shapira g...@macbook-pro.gateway.sonic.net
Committed: Mon Aug 3 14:12:00 2015 -0700

--
 .gitignore | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/cd3dc7a5/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 4aae6e7..dbc0507 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,6 +24,7 @@ kafka.ipr
 kafka.iws
 .vagrant
 Vagrantfile.local
+/logs
 
 config/server-*
 config/zookeeper-*



kafka git commit: KAFKA-2405; Don't kill the JVM on session establishment failure

2015-08-04 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk a56a79055 - 7a666f7aa


KAFKA-2405; Don't kill the JVM on session establishment failure

As noted in the JIRA https://issues.apache.org/jira/browse/KAFKA-2405 currently 
the KafkaHealthCheck causes the JVM to terminate in cases where session 
establishment with Zookeeper fails. I don't know if retrying (after a while) is 
a better way to fix this but at least, IMO, the session establishment failure 
shouldn't kill the JVM. This commit removes the `System.exit()` call.

Author: Jaikiran Pai jaikiran@gmail.com

Reviewers: Gwen Shapira csh...@gmail.com

Closes #111 from jaikiran/kafka-2405 and squashes the following commits:

0255fdb [Jaikiran Pai] KAFKA-2405 Don't kill the JVM on session establishment 
failure


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7a666f7a
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7a666f7a
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7a666f7a

Branch: refs/heads/trunk
Commit: 7a666f7aa8b1be927579817187e0b5b93543b5e2
Parents: a56a790
Author: Jaikiran Pai jaikiran@gmail.com
Authored: Tue Aug 4 17:10:02 2015 -0700
Committer: Chen Shapira g...@chens-mbp.gateway.sonic.net
Committed: Tue Aug 4 17:10:02 2015 -0700

--
 core/src/main/scala/kafka/controller/KafkaController.scala | 2 +-
 core/src/main/scala/kafka/server/KafkaHealthcheck.scala| 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/7a666f7a/core/src/main/scala/kafka/controller/KafkaController.scala
--
diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala 
b/core/src/main/scala/kafka/controller/KafkaController.scala
index 6844602..b19e57f 100755
--- a/core/src/main/scala/kafka/controller/KafkaController.scala
+++ b/core/src/main/scala/kafka/controller/KafkaController.scala
@@ -1138,7 +1138,7 @@ class KafkaController(val config : KafkaConfig, zkClient: 
ZkClient, val brokerSt
 }
 
 override def handleSessionEstablishmentError(error: Throwable): Unit = {
-  //no-op handleSessionEstablishmentError in KafkaHealthCheck should 
System.exit and log the error.
+  //no-op handleSessionEstablishmentError in KafkaHealthCheck should 
handle this error in its handleSessionEstablishmentError
 }
   }
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/7a666f7a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala 
b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
index ea0c996..e6e270b 100644
--- a/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
+++ b/core/src/main/scala/kafka/server/KafkaHealthcheck.scala
@@ -92,7 +92,6 @@ class KafkaHealthcheck(private val brokerId: Int,
 
 override def handleSessionEstablishmentError(error: Throwable): Unit = {
   fatal(Could not establish session with zookeeper, error)
-  System.exit(-1)
 }
   }
 



[1/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk c8e62c981 - f6acfb089


http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/TestBackgroundThreadExceptionHandler.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/TestBackgroundThreadExceptionHandler.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/TestBackgroundThreadExceptionHandler.java
new file mode 100644
index 000..5dc6d33
--- /dev/null
+++ 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/TestBackgroundThreadExceptionHandler.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * p/
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * p/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.util;
+
+/**
+ * An UncaughtExceptionHandler that can be registered with one or more threads 
which tracks the
+ * first exception so the main thread can check for uncaught exceptions.
+ */
+public class TestBackgroundThreadExceptionHandler implements 
Thread.UncaughtExceptionHandler {
+private Throwable firstException = null;
+
+@Override
+public void uncaughtException(Thread t, Throwable e) {
+if (this.firstException == null)
+this.firstException = e;
+}
+
+public void verifyNoExceptions() {
+if (this.firstException != null)
+throw new AssertionError(this.firstException);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/ThreadedTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/ThreadedTest.java 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/ThreadedTest.java
new file mode 100644
index 000..ed99247
--- /dev/null
+++ 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/util/ThreadedTest.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * p/
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * p/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.util;
+
+import org.junit.After;
+import org.junit.Before;
+
+/**
+ * Base class for tests that use threads. It sets up uncaught exception 
handlers for all known
+ * thread classes and checks for errors at the end of the test so that 
failures in background
+ * threads will cause the test to fail.
+ */
+public class ThreadedTest {
+
+protected TestBackgroundThreadExceptionHandler 
backgroundThreadExceptionHandler;
+
+@Before
+public void setup() {
+backgroundThreadExceptionHandler = new 
TestBackgroundThreadExceptionHandler();
+ShutdownableThread.funcaughtExceptionHandler = 
backgroundThreadExceptionHandler;
+}
+
+@After
+public void teardown() {
+backgroundThreadExceptionHandler.verifyNoExceptions();
+ShutdownableThread.funcaughtExceptionHandler = null;
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/settings.gradle
--
diff --git a/settings.gradle b/settings.gradle
index 1944917..27ae98f 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -14,5 +14,5 @@
 // limitations under the License.
 
 apply from: file('scala.gradle')
-include 'core', 'contrib:hadoop-consumer', 'contrib:hadoop-producer', 
'examples', 'clients', 'tools', 'log4j-appender'
-
+include 'core', 'contrib:hadoop-consumer', 

[4/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkConnector.java
--
diff --git 
a/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkConnector.java
 
b/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkConnector.java
new file mode 100644
index 000..e41364e
--- /dev/null
+++ 
b/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkConnector.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.file;
+
+import org.apache.kafka.copycat.connector.Task;
+import org.apache.kafka.copycat.sink.SinkConnector;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Very simple connector that works with the console. This connector supports 
both source and
+ * sink modes via its 'mode' setting.
+ */
+public class FileStreamSinkConnector extends SinkConnector {
+public static final String FILE_CONFIG = file;
+
+private String filename;
+
+@Override
+public void start(Properties props) {
+filename = props.getProperty(FILE_CONFIG);
+}
+
+@Override
+public Class? extends Task getTaskClass() {
+return FileStreamSinkTask.class;
+}
+
+@Override
+public ListProperties getTaskConfigs(int maxTasks) {
+ArrayListProperties configs = new ArrayList();
+for (int i = 0; i  maxTasks; i++) {
+Properties config = new Properties();
+if (filename != null)
+config.setProperty(FILE_CONFIG, filename);
+configs.add(config);
+}
+return configs;
+}
+
+@Override
+public void stop() {
+// Nothing to do since FileStreamSinkConnector has no background 
monitoring.
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkTask.java
--
diff --git 
a/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkTask.java
 
b/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkTask.java
new file mode 100644
index 000..7e4ca7e
--- /dev/null
+++ 
b/copycat/file/src/main/java/org/apache/kafka/copycat/file/FileStreamSinkTask.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.file;
+
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.copycat.errors.CopycatException;
+import org.apache.kafka.copycat.sink.SinkRecord;
+import org.apache.kafka.copycat.sink.SinkTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.PrintStream;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * FileStreamSinkTask writes records to stdout or a file.
+ */
+public class FileStreamSinkTask extends SinkTask {
+private static final Logger log = 
LoggerFactory.getLogger(FileStreamSinkTask.class);
+
+private PrintStream outputStream;
+
+public FileStreamSinkTask() {
+}
+
+// for testing
+public FileStreamSinkTask(PrintStream outputStream) {
+this.outputStream = outputStream;
+}
+
+@Override
+public 

[7/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
KAFKA-2366; Initial patch for Copycat

This is an initial patch implementing the basics of Copycat for KIP-26.

The intent here is to start a review of the key pieces of the core API and get 
a reasonably functional, baseline, non-distributed implementation of Copycat in 
place to get things rolling. The current patch has a number of known issues 
that need to be addressed before a final version:

* Some build-related issues. Specifically, requires some locally-installed 
dependencies (see below), ignores checkstyle for the runtime data library 
because it's lifted from Avro currently and likely won't last in its current 
form, and some Gradle task dependencies aren't quite right because I haven't 
gotten rid of the dependency on `core` (which should now be an easy patch since 
new consumer groups are in a much better state).
* This patch currently depends on some Confluent trunk code because I 
prototyped with our Avro serializers w/ schema-registry support. We need to 
figure out what we want to provide as an example built-in set of serializers. 
Unlike core Kafka where we could ignore the issue, providing only ByteArray or 
String serializers, this is pretty central to how Copycat works.
* This patch uses a hacked up version of Avro as its runtime data format. Not 
sure if we want to go through the entire API discussion just to get some basic 
code committed, so I filed KAFKA-2367 to handle that separately. The core 
connector APIs and the runtime data APIs are entirely orthogonal.
* This patch needs some updates to get aligned with recent new consumer changes 
(specifically, I'm aware of the ConcurrentModificationException issue on exit). 
More generally, the new consumer is in flux but Copycat depends on it, so there 
are likely to be some negative interactions.
* The layout feels a bit awkward to me right now because I ported it from a 
Maven layout. We don't have nearly the same level of granularity in Kafka 
currently (core and clients, plus the mostly ignored examples, log4j-appender, 
and a couple of contribs). We might want to reorganize, although keeping 
data+api separate from runtime and connector plugins is useful for minimizing 
dependencies.
* There are a variety of other things (e.g., I'm not happy with the exception 
hierarchy/how they are currently handled, TopicPartition doesn't really need to 
be duplicated unless we want Copycat entirely isolated from the Kafka APIs, 
etc), but I expect those we'll cover in the review.

Before commenting on the patch, it's probably worth reviewing 
https://issues.apache.org/jira/browse/KAFKA-2365 and 
https://issues.apache.org/jira/browse/KAFKA-2366 to get an idea of what I had 
in mind for a) what we ultimately want with all the Copycat patches and b) what 
we aim to cover in this initial patch. My hope is that we can use a WIP patch 
(after the current obvious deficiencies are addressed) while recognizing that 
we want to make iterative progress with a bunch of subsequent PRs.

Author: Ewen Cheslack-Postava m...@ewencp.org

Reviewers: Ismael Juma, Gwen Shapira

Closes #99 from ewencp/copycat and squashes the following commits:

a3a47a6 [Ewen Cheslack-Postava] Simplify Copycat exceptions, make them a 
subclass of KafkaException.
8c108b0 [Ewen Cheslack-Postava] Rename Coordinator to Herder to avoid confusion 
with the consumer coordinator.
7bf8075 [Ewen Cheslack-Postava] Make Copycat CLI speific to standalone mode, 
clean up some config and get rid of config storage in standalone mode.
656a003 [Ewen Cheslack-Postava] Clarify and expand the explanation of the 
Copycat Coordinator interface.
c0e5fdc [Ewen Cheslack-Postava] Merge remote-tracking branch 'origin/trunk' 
into copycat
0fa7a36 [Ewen Cheslack-Postava] Mark Copycat classes as unstable and reduce 
visibility of some classes where possible.
d55d31e [Ewen Cheslack-Postava] Reorganize Copycat code to put it all under one 
top-level directory.
b29cb2c [Ewen Cheslack-Postava] Merge remote-tracking branch 'origin/trunk' 
into copycat
d713a21 [Ewen Cheslack-Postava] Address Gwen's review comments.
6787a85 [Ewen Cheslack-Postava] Make Converter generic to match serializers 
since some serialization formats do not require a base class of Object; update 
many other classes to have generic key and value class type parameters to match 
this change.
b194c73 [Ewen Cheslack-Postava] Split Copycat converter option into two options 
for key and value.
0b5a1a0 [Ewen Cheslack-Postava] Normalize naming to use partition for both 
source and Kafka, adjusting naming in CopycatRecord classes to clearly 
differentiate.
e345142 [Ewen Cheslack-Postava] Remove Copycat reflection utils, use existing 
Utils and ConfigDef functionality from clients package.
be5c387 [Ewen Cheslack-Postava] Minor cleanup
122423e [Ewen Cheslack-Postava] Style cleanup
6ba87de [Ewen Cheslack-Postava] Remove most of the Avro-based mock runtime data 
API, only preserving enough schema functionality to support basic primitive 
types for an initial 

[3/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/Herder.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/Herder.java 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/Herder.java
new file mode 100644
index 000..7f8b7c2
--- /dev/null
+++ b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/Herder.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * p/
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * p/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.runtime;
+
+import org.apache.kafka.copycat.util.Callback;
+
+import java.util.Properties;
+
+/**
+ * p
+ * The herder interface tracks and manages workers and connectors. It is the 
main interface for external components
+ * to make changes to the state of the cluster. For example, in distributed 
mode, an implementation of this class
+ * knows how to accept a connector configuration, may need to route it to the 
current leader worker for the cluster so
+ * the config can be written to persistent storage, and then ensures the new 
connector is correctly instantiated on one
+ * of the workers.
+ * /p
+ * p
+ * This class must implement all the actions that can be taken on the cluster 
(add/remove connectors, pause/resume tasks,
+ * get state of connectors and tasks, etc). The non-Java interfaces to the 
cluster (REST API and CLI) are very simple
+ * wrappers of the functionality provided by this interface.
+ * /p
+ * p
+ * In standalone mode, this implementation of this class will be trivial 
because no coordination is needed. In that case,
+ * the implementation will mainly be delegating tasks directly to other 
components. For example, when creating a new
+ * connector in standalone mode, there is no need to persist the config and 
the connector and its tasks must run in the
+ * same process, so the standalone herder implementation can immediately 
instantiate and start the connector and its
+ * tasks.
+ * /p
+ */
+public interface Herder {
+
+void start();
+
+void stop();
+
+/**
+ * Submit a connector job to the cluster. This works from any node by 
forwarding the request to
+ * the leader herder if necessary.
+ *
+ * @param connectorProps user-specified properties for this job
+ * @param callback callback to invoke when the request completes
+ */
+void addConnector(Properties connectorProps, CallbackString callback);
+
+/**
+ * Delete a connector job by name.
+ *
+ * @param name name of the connector job to shutdown and delete
+ * @param callback callback to invoke when the request completes
+ */
+void deleteConnector(String name, CallbackVoid callback);
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/SinkTaskContextImpl.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/SinkTaskContextImpl.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/SinkTaskContextImpl.java
new file mode 100644
index 000..f47c984
--- /dev/null
+++ 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/SinkTaskContextImpl.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * p/
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * p/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.runtime;
+

[6/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataRuntimeException.java
--
diff --git 
a/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataRuntimeException.java
 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataRuntimeException.java
new file mode 100644
index 000..855c0fd
--- /dev/null
+++ 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataRuntimeException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+
+
+package org.apache.kafka.copycat.data;
+
+/** Base Avro exception. */
+public class DataRuntimeException extends RuntimeException {
+public DataRuntimeException(Throwable cause) {
+super(cause);
+}
+
+public DataRuntimeException(String message) {
+super(message);
+}
+
+public DataRuntimeException(String message, Throwable cause) {
+super(message, cause);
+}
+}
+

http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataTypeException.java
--
diff --git 
a/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataTypeException.java
 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataTypeException.java
new file mode 100644
index 000..6a74d88
--- /dev/null
+++ 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/DataTypeException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+
+
+package org.apache.kafka.copycat.data;
+
+
+/** Thrown when an illegal type is used. */
+public class DataTypeException extends DataRuntimeException {
+public DataTypeException(String message) {
+super(message);
+}
+
+public DataTypeException(String message, Throwable cause) {
+super(message, cause);
+}
+}
+

http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/data/src/main/java/org/apache/kafka/copycat/data/ObjectProperties.java
--
diff --git 
a/copycat/data/src/main/java/org/apache/kafka/copycat/data/ObjectProperties.java
 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/ObjectProperties.java
new file mode 100644
index 000..e995b7f
--- /dev/null
+++ 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/ObjectProperties.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+
+
+package org.apache.kafka.copycat.data;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+import 

[2/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetStorageWriter.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetStorageWriter.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetStorageWriter.java
new file mode 100644
index 000..c6e829c
--- /dev/null
+++ 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetStorageWriter.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * p/
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * p/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.storage;
+
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.kafka.copycat.errors.CopycatException;
+import org.apache.kafka.copycat.util.Callback;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Future;
+
+/**
+ * p
+ * OffsetStorageWriter is a buffered writer that wraps the simple 
OffsetBackingStore interface.
+ * It maintains a copy of the key-value data in memory and buffers writes. It 
allows you to take
+ * a snapshot, which can then be asynchronously flushed to the backing store 
while new writes
+ * continue to be processed. This allows Copycat to process offset commits in 
the background
+ * while continuing to process messages.
+ * /p
+ * p
+ * Copycat uses an OffsetStorage implementation to save state about the 
current progress of
+ * source (import to Kafka) jobs, which may have many input partitions and 
offsets may not be as
+ * simple as they are for Kafka partitions or files. Offset storage is not 
required for sink jobs
+ * because they can use Kafka's native offset storage (or the sink data store 
can handle offset
+ * storage to achieve exactly once semantics).
+ * /p
+ * p
+ * Both partitions and offsets are generic data objects. This allows different 
connectors to use
+ * whatever representation they need, even arbitrarily complex records. These 
are translated
+ * internally into the serialized form the OffsetBackingStore uses.
+ * /p
+ * p
+ * Note that this only provides write functionality. This is intentional to 
ensure stale data is
+ * never read. Offset data should only be read during startup or 
reconfiguration of a task. By
+ * always serving those requests by reading the values from the backing store, 
we ensure we never
+ * accidentally use stale data. (One example of how this can occur: a task is 
processing input
+ * partition A, writing offsets; reconfiguration causes partition A to be 
reassigned elsewhere;
+ * reconfiguration causes partition A to be reassigned to this node, but now 
the offset data is out
+ * of date). Since these offsets are created and managed by the connector 
itself, there's no way
+ * for the offset management layer to know which keys are owned by which 
tasks at any given
+ * time.
+ * /p
+ * p
+ * This class is not thread-safe. It should only be accessed from a Task's 
processing thread.
+ * /p
+ */
+public class OffsetStorageWriterK, V {
+private static final Logger log = 
LoggerFactory.getLogger(OffsetStorageWriter.class);
+
+private final OffsetBackingStore backingStore;
+private final ConverterK keyConverter;
+private final ConverterV valueConverter;
+private final SerializerK keySerializer;
+private final SerializerV valueSerializer;
+private final String namespace;
+// Offset data in Copycat format
+private MapObject, Object data = new HashMap();
+
+// Not synchronized, should only be accessed by flush thread
+private MapObject, Object toFlush = null;
+// Unique ID for each flush request to handle callbacks after timeouts
+private long currentFlushId = 0;
+
+public OffsetStorageWriter(OffsetBackingStore backingStore,
+   String namespace, ConverterK keyConverter, 
ConverterV valueConverter,
+   SerializerK keySerializer, SerializerV 
valueSerializer) {
+this.backingStore = backingStore;
+this.namespace = namespace;
+this.keyConverter = 

[5/7] kafka git commit: KAFKA-2366; Initial patch for Copycat

2015-08-14 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f6acfb08/copycat/data/src/main/java/org/apache/kafka/copycat/data/SchemaBuilder.java
--
diff --git 
a/copycat/data/src/main/java/org/apache/kafka/copycat/data/SchemaBuilder.java 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/SchemaBuilder.java
new file mode 100644
index 000..f4a76a1
--- /dev/null
+++ 
b/copycat/data/src/main/java/org/apache/kafka/copycat/data/SchemaBuilder.java
@@ -0,0 +1,2415 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the License); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+
+
+package org.apache.kafka.copycat.data;
+
+import org.apache.kafka.copycat.data.Schema.Field;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+
+/**
+ * p
+ * A fluent interface for building {@link Schema} instances. The flow of the 
API
+ * is designed to mimic the a
+ * href=http://avro.apache.org/docs/current/spec.html#schemas;Avro Schema
+ * Specification/a
+ * /p
+ * For example, the below JSON schema and the fluent builder code to create it
+ * are very similar:
+ *
+ * pre
+ * {
+ *   type: record,
+ *   name: HandshakeRequest, namespace:org.apache.avro.ipc,
+ *   fields: [
+ * {name: clientHash,
+ *  type: {type: fixed, name: MD5, size: 16}},
+ * {name: clientProtocol, type: [null, string]},
+ * {name: serverHash, type: MD5},
+ * {name: meta, type: [null, {type: map, values: bytes}]}
+ *   ]
+ * }
+ * /pre
+ *
+ * pre
+ *   Schema schema = SchemaBuilder
+ *   .record(HandshakeRequest).namespace(org.apache.avro.ipc)
+ *   .fields()
+ * .name(clientHash).type().fixed(MD5).size(16).noDefault()
+ * .name(clientProtocol).type().nullable().stringType().noDefault()
+ * .name(serverHash).type(MD5)
+ * .name(meta).type().nullable().map().values().bytesType().noDefault()
+ *   .endRecord();
+ * /pre
+ * p/
+ *
+ * h5Usage Guide/h5
+ * SchemaBuilder chains together many smaller builders and maintains nested
+ * context in order to mimic the Avro Schema specification. Every Avro type in
+ * JSON has required and optional JSON properties, as well as user-defined
+ * properties.
+ * p/
+ * h6Selecting and Building an Avro Type/h6
+ * The API analogy for the right hand side of the Avro Schema JSON
+ * pre
+ * type:
+ * /pre
+ * is a {@link TypeBuilder}, {@link FieldTypeBuilder}, or
+ * {@link UnionFieldTypeBuilder}, depending on the context. These types all
+ * share a similar API for selecting and building types.
+ * p/
+ * h5Primitive Types/h5
+ * All Avro primitive types are trivial to configure. A primitive type in 
+ * Avro JSON can be declared two ways, one that supports custom properties
+ * and one that does not:
+ * pre
+ * {type:int}
+ * {type:{name:int}}
+ * {type:{name:int, customProp:val}}
+ * /pre
+ * The analogous code form for the above three JSON lines are the below
+ * three lines:
+ * pre
+ *  .intType()
+ *  .intBuilder().endInt()
+ *  .intBuilder().prop(customProp, val).endInt()
+ * /pre
+ * Every primitive type has a shortcut to create the trivial type, and
+ * a builder when custom properties are required.  The first line above is
+ * a shortcut for the second, analogous to the JSON case.
+ * h6Named Types/h6
+ * Avro named types have names, namespace, aliases, and doc.  In this API
+ * these share a common parent, {@link NamespacedBuilder}.
+ * The builders for named types require a name to be constructed, and optional
+ * configuration via:
+ * li{@link NamespacedBuilder#doc()}/li
+ * li{@link NamespacedBuilder#namespace(String)}/li
+ * li{@link NamespacedBuilder#aliases(String...)}/li
+ * li{@link PropBuilder#prop(String, Object)}/li
+ * p/
+ * Each named type completes configuration of the optional properties
+ * with its own method:
+ * li{@link FixedBuilder#size(int)}/li
+ * li{@link EnumBuilder#symbols(String...)}/li
+ * li{@link RecordBuilder#fields()}/li
+ * Example use of a named type with all optional parameters:
+ * pre
+ * .enumeration(Suit).namespace(org.apache.test)
+ *   .aliases(org.apache.test.OldSuit)
+ *   .doc(CardSuits)
+ *   .prop(customProp, val)
+ *   .symbols(SPADES, HEARTS, DIAMONDS, CLUBS)
+ * /pre
+ * Which is equivalent to the JSON:
+ * pre
+ * { type:enum,
+ *   name:Suit, 

svn commit: r1696013 - /kafka/site/083/configuration.html

2015-08-14 Thread gwenshap
Author: gwenshap
Date: Sat Aug 15 02:18:00 2015
New Revision: 1696013

URL: http://svn.apache.org/r1696013
Log:
KAFKA-2433; Remove documentation on dead configuration item: 
replica.lag.max.messages; by Stepehn Powis

Modified:
kafka/site/083/configuration.html

Modified: kafka/site/083/configuration.html
URL: 
http://svn.apache.org/viewvc/kafka/site/083/configuration.html?rev=1696013r1=1696012r2=1696013view=diff
==
--- kafka/site/083/configuration.html (original)
+++ kafka/site/083/configuration.html Sat Aug 15 02:18:00 2015
@@ -247,11 +247,6 @@ ZooKeeper also allows you to add a chro
   tdIf a follower hasn't sent any fetch requests for this window of 
time, the leader will remove the follower from ISR (in-sync replicas) and treat 
it as dead./td
 /tr
 tr
-  tdreplica.lag.max.messages/td
-  td4000/td
-  tdIf a replica falls more than this many messages behind the leader, 
the leader will remove the follower from ISR and treat it as dead./td
-/tr
-tr
   tdreplica.socket.timeout.ms/td
   td30 * 1000/td
   tdThe socket timeout for network requests to the leader for 
replicating data./td




kafka git commit: KAFKA-2288; Follow-up to KAFKA-2249 - reduce logging and testing; Reviewd by Jun Rao

2015-08-04 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 7a666f7aa - 9cefb2a0f


KAFKA-2288; Follow-up to KAFKA-2249 - reduce logging and testing; Reviewd by 
Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/9cefb2a0
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/9cefb2a0
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/9cefb2a0

Branch: refs/heads/trunk
Commit: 9cefb2a0fb7852d35cfe0f051bc6eadb8e9c4c80
Parents: 7a666f7
Author: Gwen Shapira csh...@gmail.com
Authored: Tue Aug 4 19:04:58 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Tue Aug 4 19:04:58 2015 -0700

--
 .../kafka/common/config/AbstractConfig.java |  23 +-
 core/src/main/scala/kafka/log/LogConfig.scala   |   2 +-
 .../scala/unit/kafka/log/LogConfigTest.scala|  22 -
 .../kafka/server/KafkaConfigConfigDefTest.scala | 403 ---
 .../unit/kafka/server/KafkaConfigTest.scala | 154 ++-
 5 files changed, 175 insertions(+), 429 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/9cefb2a0/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java 
b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
index ec3ae15..6c31748 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
@@ -39,7 +39,7 @@ public class AbstractConfig {
 private final MapString, Object values;
 
 @SuppressWarnings(unchecked)
-public AbstractConfig(ConfigDef definition, Map?, ? originals) {
+public AbstractConfig(ConfigDef definition, Map?, ? originals, Boolean 
doLog) {
 /* check that all the keys are really strings */
 for (Object key : originals.keySet())
 if (!(key instanceof String))
@@ -47,7 +47,12 @@ public class AbstractConfig {
 this.originals = (MapString, ?) originals;
 this.values = definition.parse(this.originals);
 this.used = Collections.synchronizedSet(new HashSetString());
-logAll();
+if (doLog)
+logAll();
+}
+
+public AbstractConfig(ConfigDef definition, Map?, ? originals) {
+this(definition, originals, true);
 }
 
 protected Object get(String key) {
@@ -167,4 +172,18 @@ public class AbstractConfig {
 return objects;
 }
 
+@Override
+public boolean equals(Object o) {
+if (this == o) return true;
+if (o == null || getClass() != o.getClass()) return false;
+
+AbstractConfig that = (AbstractConfig) o;
+
+return originals.equals(that.originals);
+}
+
+@Override
+public int hashCode() {
+return originals.hashCode();
+}
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/9cefb2a0/core/src/main/scala/kafka/log/LogConfig.scala
--
diff --git a/core/src/main/scala/kafka/log/LogConfig.scala 
b/core/src/main/scala/kafka/log/LogConfig.scala
index fc41132..c969d16 100755
--- a/core/src/main/scala/kafka/log/LogConfig.scala
+++ b/core/src/main/scala/kafka/log/LogConfig.scala
@@ -46,7 +46,7 @@ object Defaults {
   val PreAllocateEnable = kafka.server.Defaults.LogPreAllocateEnable
 }
 
-case class LogConfig(props: java.util.Map[_, _]) extends 
AbstractConfig(LogConfig.configDef, props) {
+case class LogConfig(props: java.util.Map[_, _]) extends 
AbstractConfig(LogConfig.configDef, props, false) {
 
   val segmentSize = getInt(LogConfig.SegmentBytesProp)
   val segmentMs = getLong(LogConfig.SegmentMsProp)

http://git-wip-us.apache.org/repos/asf/kafka/blob/9cefb2a0/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala 
b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
index 19dcb47..72e98b3 100644
--- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala
@@ -33,28 +33,6 @@ class LogConfigTest extends JUnit3Suite {
   }
 
   @Test
-  def testFromPropsToProps() {
-import scala.util.Random._
-val expected = new Properties()
-LogConfig.configNames().foreach((name) = {
-  name match {
-case LogConfig.UncleanLeaderElectionEnableProp = 
expected.setProperty(name, randFrom(true, false))
-case LogConfig.CompressionTypeProp = expected.setProperty(name, 
randFrom(producer, uncompressed, gzip))
-case LogConfig.CleanupPolicyProp = expected.setProperty(name, 
randFrom(LogConfig.Compact, LogConfig.Delete))
-

kafka git commit: KAFKA-2344; kafka-merge-pr improvements

2015-07-23 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk fd3b4cc41 - 18adec7ed


KAFKA-2344; kafka-merge-pr improvements

The first 4 commits are adapted from changes that have been done to the Spark 
version and the last one is the feature that gwenshap asked for.

Author: Ismael Juma ism...@juma.me.uk

Reviewers: Gwen Shapira csh...@gmail.com

Closes #90 from ijuma/kafka-2344-merge-pr-improvements and squashes the 
following commits:

900c371 [Ismael Juma] Allow reviewers to be entered during merge
ac06347 [Ismael Juma] Allow primary author to be overridden during merge
b309829 [Ismael Juma] Set JIRA resolution to Fixed instead of relying on 
default transition
0c69a64 [Ismael Juma] Check return value of doctest.testmod()
061cdce [Ismael Juma] Fix instructions on how to install the `jira-python` 
library


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/18adec7e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/18adec7e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/18adec7e

Branch: refs/heads/trunk
Commit: 18adec7ed5ad71eb73171aff0e0c82d627cfa5d5
Parents: fd3b4cc
Author: Ismael Juma ism...@juma.me.uk
Authored: Thu Jul 23 08:46:47 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Thu Jul 23 08:46:47 2015 -0700

--
 kafka-merge-pr.py | 24 
 1 file changed, 20 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/18adec7e/kafka-merge-pr.py
--
diff --git a/kafka-merge-pr.py b/kafka-merge-pr.py
index c6ef3df..876f530 100644
--- a/kafka-merge-pr.py
+++ b/kafka-merge-pr.py
@@ -130,7 +130,15 @@ def merge_pr(pr_num, target_ref, title, body, 
pr_repo_desc):
  '--pretty=format:%an %ae']).split(\n)
 distinct_authors = sorted(set(commit_authors),
   key=lambda x: commit_authors.count(x), 
reverse=True)
-primary_author = distinct_authors[0]
+primary_author = raw_input(
+Enter primary author in the format of \name email\ [%s]:  %
+distinct_authors[0])
+if primary_author == :
+primary_author = distinct_authors[0]
+
+reviewers = raw_input(
+Enter reviewers in the format of \name1 email1, name2 email2\: 
).strip()
+
 commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
   '--pretty=format:%h [%an] %s']).split(\n\n)
 
@@ -146,6 +154,9 @@ def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
 
 merge_message_flags += [-m, authors]
 
+if (reviewers != ):
+merge_message_flags += [-m, Reviewers: %s % reviewers]
+
 if had_conflicts:
 committer_name = run_cmd(git config --get user.name).strip()
 committer_email = run_cmd(git config --get user.email).strip()
@@ -278,7 +289,10 @@ def resolve_jira_issue(merge_branches, comment, 
default_jira_id=):
 jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
 
 resolve = filter(lambda a: a['name'] == Resolve Issue, 
asf_jira.transitions(jira_id))[0]
-asf_jira.transition_issue(jira_id, resolve[id], 
fixVersions=jira_fix_versions, comment=comment)
+resolution = filter(lambda r: r.raw['name'] == Fixed, 
asf_jira.resolutions())[0]
+asf_jira.transition_issue(
+jira_id, resolve[id], fixVersions = jira_fix_versions,
+comment = comment, resolution = {'id': resolution.raw['id']})
 
 print Successfully resolved %s with fixVersions=%s! % (jira_id, 
fix_versions)
 
@@ -435,11 +449,13 @@ def main():
 print JIRA_USERNAME and JIRA_PASSWORD not set
 print Exiting without trying to close the associated JIRA.
 else:
-print Could not find jira-python library. Run 'sudo pip install 
jira-python' to install.
+print Could not find jira-python library. Run 'sudo pip install jira' 
to install.
 print Exiting without trying to close the associated JIRA.
 
 if __name__ == __main__:
 import doctest
-doctest.testmod()
+(failure_count, test_count) = doctest.testmod()
+if (failure_count):
+exit(-1)
 
 main()



kafka git commit: KAFKA-2355; Add an unit test to validate the deletion of a partition marked as deleted; patched by Edward Ribeiro, reviewed by Ashish Singh, Ismael Juma and Grant Henke

2015-07-23 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 18adec7ed - 66c8647d8


KAFKA-2355;  Add an unit test to validate the deletion of a partition marked as 
deleted; patched by Edward Ribeiro, reviewed by Ashish Singh, Ismael Juma and 
Grant Henke


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/66c8647d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/66c8647d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/66c8647d

Branch: refs/heads/trunk
Commit: 66c8647d85f801893b9f882a7ad49242cef683fd
Parents: 18adec7
Author: Edward Ribeiro edward.ribe...@gmail.com
Authored: Thu Jul 23 09:17:19 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Thu Jul 23 09:17:19 2015 -0700

--
 .../unit/kafka/admin/DeleteTopicTest.scala  | 23 +++-
 1 file changed, 22 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/66c8647d/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
--
diff --git a/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala 
b/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
index fa8ce25..c38df80 100644
--- a/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
+++ b/core/src/test/scala/unit/kafka/admin/DeleteTopicTest.scala
@@ -24,7 +24,7 @@ import kafka.utils.{ZkUtils, TestUtils}
 import kafka.server.{KafkaServer, KafkaConfig}
 import org.junit.Test
 import java.util.Properties
-import kafka.common.TopicAndPartition
+import kafka.common.{TopicAlreadyMarkedForDeletionException, TopicAndPartition}
 
 class DeleteTopicTest extends JUnit3Suite with ZooKeeperTestHarness {
 
@@ -249,6 +249,27 @@ class DeleteTopicTest extends JUnit3Suite with 
ZooKeeperTestHarness {
 servers.foreach(_.shutdown())
   }
 
+  @Test
+  def testDeleteTopicAlreadyMarkedAsDeleted() {
+val topicAndPartition = TopicAndPartition(test, 0)
+val topic = topicAndPartition.topic
+val servers = createTestTopicAndCluster(topic)
+
+try {
+  // start topic deletion
+  AdminUtils.deleteTopic(zkClient, topic)
+  // try to delete topic marked as deleted
+  AdminUtils.deleteTopic(zkClient, topic)
+  fail(Expected TopicAlreadyMarkedForDeletionException)
+}
+catch {
+  case e: TopicAlreadyMarkedForDeletionException = // expected exception
+}
+
+TestUtils.verifyTopicDeletion(zkClient, topic, 1, servers)
+servers.foreach(_.shutdown())
+  }
+
   private def createTestTopicAndCluster(topic: String): Seq[KafkaServer] = {
 
 val brokerConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false)



kafka git commit: KAFKA-2198: kafka-topics.sh exits with 0 status on failures; patched by Manikumar Reddy reviewed by Gwen Shapira

2015-07-13 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk bdbb9672f - a7e0ac365


KAFKA-2198: kafka-topics.sh exits with 0 status on failures; patched by 
Manikumar Reddy reviewed by Gwen Shapira


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a7e0ac36
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a7e0ac36
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a7e0ac36

Branch: refs/heads/trunk
Commit: a7e0ac3659c2b499124a866bc0b16b6b1b412376
Parents: bdbb967
Author: Manikumar Reddy manikumar.re...@gmail.com
Authored: Mon Jul 13 22:08:33 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Mon Jul 13 22:08:33 2015 -0700

--
 core/src/main/scala/kafka/admin/TopicCommand.scala | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a7e0ac36/core/src/main/scala/kafka/admin/TopicCommand.scala
--
diff --git a/core/src/main/scala/kafka/admin/TopicCommand.scala 
b/core/src/main/scala/kafka/admin/TopicCommand.scala
index a2ecb96..a90aa87 100755
--- a/core/src/main/scala/kafka/admin/TopicCommand.scala
+++ b/core/src/main/scala/kafka/admin/TopicCommand.scala
@@ -31,7 +31,7 @@ import org.apache.kafka.common.utils.Utils
 import kafka.coordinator.ConsumerCoordinator
 
 
-object TopicCommand {
+object TopicCommand extends Logging {
 
   def main(args: Array[String]): Unit = {
 
@@ -48,7 +48,7 @@ object TopicCommand {
 opts.checkArgs()
 
 val zkClient = 
ZkUtils.createZkClient(opts.options.valueOf(opts.zkConnectOpt), 3, 3)
-
+var exitCode = 0
 try {
   if(opts.options.has(opts.createOpt))
 createTopic(zkClient, opts)
@@ -62,11 +62,14 @@ object TopicCommand {
 deleteTopic(zkClient, opts)
 } catch {
   case e: Throwable =
-println(Error while executing topic command  + e.getMessage)
-println(Utils.stackTrace(e))
+println(Error while executing topic command :  + e.getMessage)
+error(Utils.stackTrace(e))
+exitCode = 1
 } finally {
   zkClient.close()
+  System.exit(exitCode)
 }
+
   }
 
   private def getTopics(zkClient: ZkClient, opts: TopicCommandOptions): 
Seq[String] = {
@@ -97,7 +100,8 @@ object TopicCommand {
   def alterTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
 val topics = getTopics(zkClient, opts)
 if (topics.length == 0) {
-  println(Topic %s does not 
exist.format(opts.options.valueOf(opts.topicOpt)))
+  throw new IllegalArgumentException(Topic %s does not exist on ZK path 
%s.format(opts.options.valueOf(opts.topicOpt),
+  opts.options.valueOf(opts.zkConnectOpt)))
 }
 topics.foreach { topic =
   val configs = AdminUtils.fetchTopicConfig(zkClient, topic)
@@ -138,7 +142,8 @@ object TopicCommand {
   def deleteTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
 val topics = getTopics(zkClient, opts)
 if (topics.length == 0) {
-  println(Topic %s does not 
exist.format(opts.options.valueOf(opts.topicOpt)))
+  throw new IllegalArgumentException(Topic %s does not exist on ZK path 
%s.format(opts.options.valueOf(opts.topicOpt),
+  opts.options.valueOf(opts.zkConnectOpt)))
 }
 topics.foreach { topic =
   try {



kafka git commit: KAFKA-2345; Attempt to delete a topic already marked for deletion throws ZkNodeExistsException; patched by Ashish Singh; reviewed by Sriharsha Chintalapani and Ismael Juma

2015-07-17 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 15cba9f00 - a5b11886d


KAFKA-2345;  Attempt to delete a topic already marked for deletion throws 
ZkNodeExistsException; patched by Ashish Singh; reviewed by Sriharsha 
Chintalapani and Ismael Juma


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a5b11886
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a5b11886
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a5b11886

Branch: refs/heads/trunk
Commit: a5b11886df8c7aad0548efd2c7c3dbc579232f03
Parents: 15cba9f
Author: Ashish Singh asi...@cloudera.com
Authored: Fri Jul 17 10:32:43 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Fri Jul 17 10:32:43 2015 -0700

--
 core/src/main/scala/kafka/admin/AdminUtils.scala | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a5b11886/core/src/main/scala/kafka/admin/AdminUtils.scala
--
diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala 
b/core/src/main/scala/kafka/admin/AdminUtils.scala
index f06edf4..2b4e028 100644
--- a/core/src/main/scala/kafka/admin/AdminUtils.scala
+++ b/core/src/main/scala/kafka/admin/AdminUtils.scala
@@ -163,7 +163,13 @@ object AdminUtils extends Logging {
   }
   
   def deleteTopic(zkClient: ZkClient, topic: String) {
-ZkUtils.createPersistentPath(zkClient, ZkUtils.getDeleteTopicPath(topic))
+try {
+  ZkUtils.createPersistentPath(zkClient, ZkUtils.getDeleteTopicPath(topic))
+} catch {
+  case e1: ZkNodeExistsException = throw new 
TopicAlreadyMarkedForDeletionException(
+topic %s is already marked for deletion.format(topic))
+  case e2: Throwable = throw new AdminOperationException(e2.toString)
+}
   }
   
   def isConsumerGroupActive(zkClient: ZkClient, group: String) = {



kafka git commit: KAFKA-2337; Verify that metric names will not collide when creating new topics; patched by Grant Henke; reviewed by Edward Ribeiro, Ashish Singh and Gwen Shapira

2015-07-20 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 846362724 - 1d2bd6284


KAFKA-2337;  Verify that metric names will not collide when creating new 
topics; patched by Grant Henke; reviewed by Edward Ribeiro, Ashish Singh and 
Gwen Shapira


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1d2bd628
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1d2bd628
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1d2bd628

Branch: refs/heads/trunk
Commit: 1d2bd6284b06b579c901e6be8919a8a27dbe11ee
Parents: 8463627
Author: Grant Henke granthe...@gmail.com
Authored: Mon Jul 20 16:15:42 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Mon Jul 20 16:15:42 2015 -0700

--
 .../src/main/scala/kafka/admin/AdminUtils.scala | 17 ++--
 .../main/scala/kafka/admin/TopicCommand.scala   |  4 +-
 core/src/main/scala/kafka/common/Topic.scala| 22 +++
 .../test/scala/unit/kafka/admin/AdminTest.scala | 16 +++-
 .../scala/unit/kafka/common/TopicTest.scala | 41 
 5 files changed, 95 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1d2bd628/core/src/main/scala/kafka/admin/AdminUtils.scala
--
diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala 
b/core/src/main/scala/kafka/admin/AdminUtils.scala
index 2b4e028..4cc2376 100644
--- a/core/src/main/scala/kafka/admin/AdminUtils.scala
+++ b/core/src/main/scala/kafka/admin/AdminUtils.scala
@@ -250,8 +250,19 @@ object AdminUtils extends Logging {
 require(partitionReplicaAssignment.values.map(_.size).toSet.size == 1, 
All partitions should have the same number of replicas.)
 
 val topicPath = ZkUtils.getTopicPath(topic)
-if(!update  zkClient.exists(topicPath))
-  throw new TopicExistsException(Topic \%s\ already 
exists..format(topic))
+
+if (!update) {
+  if (zkClient.exists(topicPath))
+throw new TopicExistsException(Topic \%s\ already 
exists..format(topic))
+  else if (Topic.hasCollisionChars(topic)) {
+val allTopics = ZkUtils.getAllTopics(zkClient)
+val collidingTopics = allTopics.filter(t = Topic.hasCollision(topic, 
t))
+if (collidingTopics.nonEmpty) {
+  throw new InvalidTopicException(Topic \%s\ collides with existing 
topics: %s.format(topic, collidingTopics.mkString(, )))
+}
+  }
+}
+
 partitionReplicaAssignment.values.foreach(reps = require(reps.size == 
reps.toSet.size, Duplicate replica assignment found:   + 
partitionReplicaAssignment))
 
 // write out the config if there is any, this isn't transactional with the 
partition assignments
@@ -260,7 +271,7 @@ object AdminUtils extends Logging {
 // create the partition assignment
 writeTopicPartitionAssignment(zkClient, topic, partitionReplicaAssignment, 
update)
   }
-  
+
   private def writeTopicPartitionAssignment(zkClient: ZkClient, topic: String, 
replicaAssignment: Map[Int, Seq[Int]], update: Boolean) {
 try {
   val zkPath = ZkUtils.getTopicPath(topic)

http://git-wip-us.apache.org/repos/asf/kafka/blob/1d2bd628/core/src/main/scala/kafka/admin/TopicCommand.scala
--
diff --git a/core/src/main/scala/kafka/admin/TopicCommand.scala 
b/core/src/main/scala/kafka/admin/TopicCommand.scala
index a90aa87..4e28bf1 100755
--- a/core/src/main/scala/kafka/admin/TopicCommand.scala
+++ b/core/src/main/scala/kafka/admin/TopicCommand.scala
@@ -85,9 +85,11 @@ object TopicCommand extends Logging {
   def createTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
 val topic = opts.options.valueOf(opts.topicOpt)
 val configs = parseTopicConfigsToBeAdded(opts)
+if (Topic.hasCollisionChars(topic))
+  println(WARNING: Due to limitations in metric names, topics with a 
period ('.') or underscore ('_') could collide. To avoid issues it is best to 
use either, but not both.)
 if (opts.options.has(opts.replicaAssignmentOpt)) {
   val assignment = 
parseReplicaAssignment(opts.options.valueOf(opts.replicaAssignmentOpt))
-  AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, 
topic, assignment, configs)
+  AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, 
topic, assignment, configs, update = false)
 } else {
   CommandLineUtils.checkRequiredArgs(opts.parser, opts.options, 
opts.partitionsOpt, opts.replicationFactorOpt)
   val partitions = opts.options.valueOf(opts.partitionsOpt).intValue

http://git-wip-us.apache.org/repos/asf/kafka/blob/1d2bd628/core/src/main/scala/kafka/common/Topic.scala
--
diff --git a/core/src/main/scala/kafka/common/Topic.scala 

kafka git commit: KAFKA-2328; merge-kafka-pr.py script should not leave user in a detached branch

2015-07-20 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 1d2bd6284 - 204089046


KAFKA-2328; merge-kafka-pr.py script should not leave user in a detached branch

The right command to get the branch name is `git rev-parse --abbrev-ref HEAD` 
instead of `git rev-parse HEAD`. The latter gives the commit hash causing a 
detached branch when we checkout to it. Seems like a bug we inherited from the 
Spark script.

Author: Ismael Juma ism...@juma.me.uk

Closes #84 from ijuma/kafka-2328-merge-script-no-detached-branch and squashes 
the following commits:

ae201dd [Ismael Juma] KAFKA-2328; merge-kafka-pr.py script should not leave 
user in a detached branch


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/20408904
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/20408904
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/20408904

Branch: refs/heads/trunk
Commit: 2040890462a22614ccb836e43837f8739855b84f
Parents: 1d2bd62
Author: Ismael Juma ism...@juma.me.uk
Authored: Mon Jul 20 16:40:54 2015 -0700
Committer: Gwen Shapira csh...@gmail.com
Committed: Mon Jul 20 16:40:54 2015 -0700

--
 kafka-merge-pr.py | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/20408904/kafka-merge-pr.py
--
diff --git a/kafka-merge-pr.py b/kafka-merge-pr.py
index 4cc82e0..c6ef3df 100644
--- a/kafka-merge-pr.py
+++ b/kafka-merge-pr.py
@@ -95,8 +95,9 @@ def continue_maybe(prompt):
 fail(Okay, exiting)
 
 def clean_up():
-print Restoring head pointer to %s % original_head
-run_cmd(git checkout %s % original_head)
+if original_head != get_current_branch():
+print Restoring head pointer to %s % original_head
+run_cmd(git checkout %s % original_head)
 
 branches = run_cmd(git branch).replace( , ).split(\n)
 
@@ -104,6 +105,8 @@ def clean_up():
 print Deleting local branch %s % branch
 run_cmd(git branch -D %s % branch)
 
+def get_current_branch():
+return run_cmd(git rev-parse --abbrev-ref HEAD).replace(\n, )
 
 # merge the requested PR and return the merge hash
 def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
@@ -350,7 +353,7 @@ def standardize_jira_ref(text):
 def main():
 global original_head
 
-original_head = run_cmd(git rev-parse HEAD)[:8]
+original_head = get_current_branch()
 
 branches = get_json(%s/branches % GITHUB_API_BASE)
 branch_names = filter(lambda x: x.startswith(RELEASE_BRANCH_PREFIX), 
[x['name'] for x in branches])



kafka git commit: MINOR: update to correct clock skew

2015-10-29 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 0da654761 -> e0ca8fdd7


MINOR: update to correct clock skew

ewencp
Updated the provisioning script to install ntp daemon.

Author: Geoff Anderson 

Reviewers: Gwen Shapira

Closes #383 from granders/minor-systest-clock-skew


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e0ca8fdd
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e0ca8fdd
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e0ca8fdd

Branch: refs/heads/trunk
Commit: e0ca8fdd7cf3fcc51a5a1152eacdf585434052d8
Parents: 0da6547
Author: Geoff Anderson 
Authored: Thu Oct 29 15:53:19 2015 -0700
Committer: Gwen Shapira 
Committed: Thu Oct 29 15:53:19 2015 -0700

--
 vagrant/base.sh | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e0ca8fdd/vagrant/base.sh
--
diff --git a/vagrant/base.sh b/vagrant/base.sh
index 2c2e5c2..08b38ae 100644
--- a/vagrant/base.sh
+++ b/vagrant/base.sh
@@ -72,3 +72,8 @@ if [ ! -e /mnt ]; then
 mkdir /mnt
 fi
 chmod a+rwx /mnt
+
+# Run ntpdate once to sync to ntp servers
+ntpdate pool.ntp.org
+# Install ntp daemon - it will automatically start on boot
+apt-get -y install ntp



kafka git commit: KAFKA-2645: Document potentially breaking changes in the release note…

2015-10-27 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk e6f9b9e47 -> fc4ef4791


KAFKA-2645: Document potentially breaking changes in the release note…

…s for 0.9.0

Author: Grant Henke 

Reviewers: Gwen Shapira, Guozhang Wang

Closes #337 from granthenke/docs


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/fc4ef479
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/fc4ef479
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/fc4ef479

Branch: refs/heads/trunk
Commit: fc4ef479109fbae12470e44f8c12fe9e5f41e179
Parents: e6f9b9e
Author: Grant Henke 
Authored: Tue Oct 27 07:43:19 2015 -0700
Committer: Gwen Shapira 
Committed: Tue Oct 27 07:43:19 2015 -0700

--
 docs/configuration.html |  6 +++---
 docs/documentation.html | 10 +-
 docs/upgrade.html   | 28 +++-
 3 files changed, 31 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/fc4ef479/docs/configuration.html
--
diff --git a/docs/configuration.html b/docs/configuration.html
index c3cc13e..41cf995 100644
--- a/docs/configuration.html
+++ b/docs/configuration.html
@@ -5,9 +5,9 @@
  The ASF licenses this file to You under the Apache License, Version 2.0
  (the "License"); you may not use this file except in compliance with
  the License.  You may obtain a copy of the License at
- 
+
 http://www.apache.org/licenses/LICENSE-2.0
- 
+
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -445,7 +445,7 @@ ZooKeeper also allows you to add a "chroot" path which will 
make all kafka data
 
 
   inter.broker.protocol.version
-  0.8.3
+  0.9.0
   Version of the protocol brokers will use to communicate with each 
other. This will default for the current version of the broker, but may need to 
be set to older versions during a rolling upgrade process. In that scenario, 
upgraded brokers will use the older version of the protocol and therefore will 
be able to communicate with brokers that were not yet upgraded. See upgrade section for more details.
 
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/fc4ef479/docs/documentation.html
--
diff --git a/docs/documentation.html b/docs/documentation.html
index 8f9b081..860f276 100644
--- a/docs/documentation.html
+++ b/docs/documentation.html
@@ -5,9 +5,9 @@
  The ASF licenses this file to You under the Apache License, Version 2.0
  (the "License"); you may not use this file except in compliance with
  the License.  You may obtain a copy of the License at
- 
+
 http://www.apache.org/licenses/LICENSE-2.0
- 
+
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -17,10 +17,10 @@
 
 
 
-Kafka 0.8.2 Documentation
-Prior releases: 0.7.x, 0.8.0, 0.8.1.X.
+Kafka 0.9.0 Documentation
+Prior releases: 0.7.x, 0.8.0, 0.8.1.X, 0.8.2.X.
 
-
+
 
 1. Getting Started
  

http://git-wip-us.apache.org/repos/asf/kafka/blob/fc4ef479/docs/upgrade.html
--
diff --git a/docs/upgrade.html b/docs/upgrade.html
index 4b7033a..69bcdc1 100644
--- a/docs/upgrade.html
+++ b/docs/upgrade.html
@@ -5,9 +5,9 @@
  The ASF licenses this file to You under the Apache License, Version 2.0
  (the "License"); you may not use this file except in compliance with
  the License.  You may obtain a copy of the License at
- 
+
 http://www.apache.org/licenses/LICENSE-2.0
- 
+
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -17,13 +17,13 @@
 
 1.5 Upgrading From Previous Versions
 
-Upgrading from 0.8.0, 0.8.1.X or 0.8.2.X to 0.8.3.0
+Upgrading from 0.8.0, 0.8.1.X or 0.8.2.X to 0.9.0.0
 
-0.8.3.0 has an inter-broker protocol change from previous versions. For a 
rolling upgrade:
+0.9.0.0 has an inter-broker protocol change from previous versions. For a 
rolling upgrade:
 
 Update server.properties file on all brokers and add the following 
property: inter.broker.protocol.version=0.8.2.X 
 Upgrade the brokers. This can be done a broker at a time by simply 
bringing it down, updating the code, and restarting it. 
-Once the entire cluster is upgraded, bump the protocol 

kafka git commit: KAFKA-2452: Add new consumer option to mirror maker.

2015-10-27 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 2e4aed707 -> 2fd645ac2


KAFKA-2452: Add new consumer option to mirror maker.

Author: Jiangjie Qin 

Reviewers: Ben Stopford, Guozhang Wang

Closes #266 from becketqin/KAFKA-2452


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/2fd645ac
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/2fd645ac
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/2fd645ac

Branch: refs/heads/trunk
Commit: 2fd645ac2fec7cf089cb8175ee47823b67a07226
Parents: 2e4aed7
Author: Jiangjie Qin 
Authored: Tue Oct 27 07:59:52 2015 -0700
Committer: Gwen Shapira 
Committed: Tue Oct 27 07:59:52 2015 -0700

--
 .../scala/kafka/consumer/BaseConsumer.scala |  12 +-
 .../main/scala/kafka/tools/MirrorMaker.scala| 554 ---
 2 files changed, 373 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/2fd645ac/core/src/main/scala/kafka/consumer/BaseConsumer.scala
--
diff --git a/core/src/main/scala/kafka/consumer/BaseConsumer.scala 
b/core/src/main/scala/kafka/consumer/BaseConsumer.scala
index 8b93493..52cd5fa 100644
--- a/core/src/main/scala/kafka/consumer/BaseConsumer.scala
+++ b/core/src/main/scala/kafka/consumer/BaseConsumer.scala
@@ -28,13 +28,15 @@ trait BaseConsumer {
   def receive(): BaseConsumerRecord
   def stop()
   def cleanup()
+  def commit()
 }
 
 case class BaseConsumerRecord(topic: String, partition: Int, offset: Long, 
key: Array[Byte], value: Array[Byte])
 
 class NewShinyConsumer(topic: String, consumerProps: Properties, val 
timeoutMs: Long = Long.MaxValue) extends BaseConsumer {
   import org.apache.kafka.clients.consumer.KafkaConsumer
-  import scala.collection.JavaConversions._
+
+import scala.collection.JavaConversions._
 
   val consumer = new KafkaConsumer[Array[Byte], Array[Byte]](consumerProps)
   consumer.subscribe(List(topic))
@@ -58,6 +60,10 @@ class NewShinyConsumer(topic: String, consumerProps: 
Properties, val timeoutMs:
   override def cleanup() {
 this.consumer.close()
   }
+
+  override def commit() {
+this.consumer.commitSync()
+  }
 }
 
 class OldConsumer(topicFilter: TopicFilter, consumerProps: Properties) extends 
BaseConsumer {
@@ -81,5 +87,9 @@ class OldConsumer(topicFilter: TopicFilter, consumerProps: 
Properties) extends B
   override def cleanup() {
 this.consumerConnector.shutdown()
   }
+
+  override def commit() {
+this.consumerConnector.commitOffsets
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/2fd645ac/core/src/main/scala/kafka/tools/MirrorMaker.scala
--
diff --git a/core/src/main/scala/kafka/tools/MirrorMaker.scala 
b/core/src/main/scala/kafka/tools/MirrorMaker.scala
index fbe0c83..3cf754b 100755
--- a/core/src/main/scala/kafka/tools/MirrorMaker.scala
+++ b/core/src/main/scala/kafka/tools/MirrorMaker.scala
@@ -20,22 +20,27 @@ package kafka.tools
 import java.util
 import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
 import java.util.concurrent.{CountDownLatch, TimeUnit}
+import java.util.regex.Pattern
 import java.util.{Collections, Properties}
 
 import com.yammer.metrics.core.Gauge
 import joptsimple.OptionParser
-import kafka.consumer.{Blacklist, ConsumerConfig, ConsumerThreadId, 
ConsumerTimeoutException, TopicFilter, Whitelist, ZookeeperConsumerConnector}
+import kafka.client.ClientUtils
+import kafka.consumer.{BaseConsumerRecord, ConsumerIterator, BaseConsumer, 
Blacklist, ConsumerConfig, ConsumerThreadId, ConsumerTimeoutException, 
TopicFilter, Whitelist, ZookeeperConsumerConnector}
 import kafka.javaapi.consumer.ConsumerRebalanceListener
 import kafka.message.MessageAndMetadata
 import kafka.metrics.KafkaMetricsGroup
 import kafka.serializer.DefaultDecoder
 import kafka.utils.{CommandLineUtils, CoreUtils, Logging}
+import org.apache.kafka.clients.consumer.{ConsumerWakeupException, Consumer, 
ConsumerRecord, KafkaConsumer}
 import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
 import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, 
ProducerRecord, RecordMetadata}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.serialization.ByteArrayDeserializer
 import org.apache.kafka.common.utils.Utils
 
 import scala.collection.JavaConversions._
-
+import scala.util.control.ControlThrowable
 
 /**
  * The mirror maker has the following architecture:
@@ -56,12 +61,11 @@ import scala.collection.JavaConversions._
  */
 object MirrorMaker extends Logging with KafkaMetricsGroup {
 
-  private var connectors: Seq[ZookeeperConsumerConnector] = null
   private var producer: 

kafka git commit: KAFKA-2516: Rename o.a.k.client.tools to o.a.k.tools

2015-10-27 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk fc4ef4791 -> 2e4aed707


KAFKA-2516: Rename o.a.k.client.tools to o.a.k.tools

Author: Grant Henke 

Reviewers: Gwen Shapira, Ewen Cheslack-Postava

Closes #310 from granthenke/tools-packaging


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/2e4aed70
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/2e4aed70
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/2e4aed70

Branch: refs/heads/trunk
Commit: 2e4aed7070f0283e2c1e0e563fdb3324482463a5
Parents: fc4ef47
Author: Grant Henke 
Authored: Tue Oct 27 07:44:32 2015 -0700
Committer: Gwen Shapira 
Committed: Tue Oct 27 07:44:32 2015 -0700

--
 bin/kafka-verifiable-producer.sh|   2 +-
 checkstyle/import-control.xml   |  14 +-
 .../kafkatest/services/kafka_log4j_appender.py  |   2 +-
 .../performance/producer_performance.py |   2 +-
 .../clients/tools/ProducerPerformance.java  | 201 
 .../clients/tools/ThroughputThrottler.java  | 118 ---
 .../clients/tools/VerifiableLog4jAppender.java  | 162 --
 .../kafka/clients/tools/VerifiableProducer.java | 324 ---
 .../apache/kafka/tools/ProducerPerformance.java | 201 
 .../apache/kafka/tools/ThroughputThrottler.java | 117 +++
 .../kafka/tools/VerifiableLog4jAppender.java| 162 ++
 .../apache/kafka/tools/VerifiableProducer.java  | 324 +++
 12 files changed, 814 insertions(+), 815 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/2e4aed70/bin/kafka-verifiable-producer.sh
--
diff --git a/bin/kafka-verifiable-producer.sh b/bin/kafka-verifiable-producer.sh
index d0aa6c5..98fe557 100755
--- a/bin/kafka-verifiable-producer.sh
+++ b/bin/kafka-verifiable-producer.sh
@@ -17,4 +17,4 @@
 if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
 export KAFKA_HEAP_OPTS="-Xmx512M"
 fi
-exec $(dirname $0)/kafka-run-class.sh 
org.apache.kafka.clients.tools.VerifiableProducer $@
+exec $(dirname $0)/kafka-run-class.sh 
org.apache.kafka.tools.VerifiableProducer $@

http://git-wip-us.apache.org/repos/asf/kafka/blob/2e4aed70/checkstyle/import-control.xml
--
diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml
index e1ea93c..187bee8 100644
--- a/checkstyle/import-control.xml
+++ b/checkstyle/import-control.xml
@@ -105,14 +105,14 @@
 
   
 
+  
 
-
-  
-  
-  
-  
-  
-
+  
+
+
+
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/kafka/blob/2e4aed70/tests/kafkatest/services/kafka_log4j_appender.py
--
diff --git a/tests/kafkatest/services/kafka_log4j_appender.py 
b/tests/kafkatest/services/kafka_log4j_appender.py
index 11369aa..ff6bb18 100644
--- a/tests/kafkatest/services/kafka_log4j_appender.py
+++ b/tests/kafkatest/services/kafka_log4j_appender.py
@@ -38,7 +38,7 @@ class KafkaLog4jAppender(BackgroundThreadService):
 
 @property
 def start_cmd(self):
-cmd = "/opt/kafka/bin/kafka-run-class.sh 
org.apache.kafka.clients.tools.VerifiableLog4jAppender" \
+cmd = "/opt/kafka/bin/kafka-run-class.sh 
org.apache.kafka.tools.VerifiableLog4jAppender" \
   " --topic %s --broker-list %s" % (self.topic, 
self.kafka.bootstrap_servers())
 if self.max_messages > 0:
 cmd += " --max-messages %s" % str(self.max_messages)

http://git-wip-us.apache.org/repos/asf/kafka/blob/2e4aed70/tests/kafkatest/services/performance/producer_performance.py
--
diff --git a/tests/kafkatest/services/performance/producer_performance.py 
b/tests/kafkatest/services/performance/producer_performance.py
index f842026..25911af 100644
--- a/tests/kafkatest/services/performance/producer_performance.py
+++ b/tests/kafkatest/services/performance/producer_performance.py
@@ -46,7 +46,7 @@ class ProducerPerformanceService(JmxMixin, 
PerformanceService):
 def _worker(self, idx, node):
 args = self.args.copy()
 args.update({'bootstrap_servers': self.kafka.bootstrap_servers(), 
'jmx_port': self.jmx_port, 'client_id': self.client_id})
-cmd = "JMX_PORT=%(jmx_port)d /opt/kafka/bin/kafka-run-class.sh 
org.apache.kafka.clients.tools.ProducerPerformance " \
+cmd = "JMX_PORT=%(jmx_port)d /opt/kafka/bin/kafka-run-class.sh 
org.apache.kafka.tools.ProducerPerformance " \
   "%(topic)s %(num_records)d %(record_size)d %(throughput)d 
bootstrap.servers=%(bootstrap_servers)s client.id=%(client_id)s" % 

kafka git commit: KAFKA-2447: Add capability to KafkaLog4jAppender to be able to use SSL

2015-10-27 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 2fd645ac2 -> d21cb66e7


KAFKA-2447: Add capability to KafkaLog4jAppender to be able to use SSL

Author: Ashish Singh 

Reviewers: Gwen Shapira, Ismael Juma

Closes #175 from SinghAsDev/KAFKA-2447


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d21cb66e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d21cb66e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d21cb66e

Branch: refs/heads/trunk
Commit: d21cb66e7d21ed3d20fc1e13b9a856f764bb4237
Parents: 2fd645a
Author: Ashish Singh 
Authored: Tue Oct 27 08:45:27 2015 -0700
Committer: Gwen Shapira 
Committed: Tue Oct 27 08:45:27 2015 -0700

--
 .../kafka/log4jappender/KafkaLog4jAppender.java | 88 ++--
 1 file changed, 82 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d21cb66e/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java
--
diff --git 
a/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java
 
b/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java
index 2baef06..94120e2 100644
--- 
a/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java
+++ 
b/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java
@@ -17,11 +17,14 @@
 
 package org.apache.kafka.log4jappender;
 
+import org.apache.kafka.clients.CommonClientConfigs;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerConfig;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.common.config.SSLConfigs;
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.helpers.LogLog;
 import org.apache.log4j.spi.LoggingEvent;
@@ -36,16 +39,28 @@ import java.util.concurrent.Future;
  */
 public class KafkaLog4jAppender extends AppenderSkeleton {
 
-private static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
-private static final String COMPRESSION_TYPE_CONFIG = "compression.type";
-private static final String ACKS_CONFIG = "acks";
-private static final String RETRIES_CONFIG = "retries";
-private static final String KEY_SERIALIZER_CLASS_CONFIG = "key.serializer";
-private static final String VALUE_SERIALIZER_CLASS_CONFIG = 
"value.serializer";
+private static final String BOOTSTRAP_SERVERS_CONFIG = 
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG;
+private static final String COMPRESSION_TYPE_CONFIG = 
ProducerConfig.COMPRESSION_TYPE_CONFIG;
+private static final String ACKS_CONFIG = ProducerConfig.ACKS_CONFIG;
+private static final String RETRIES_CONFIG = ProducerConfig.RETRIES_CONFIG;
+private static final String KEY_SERIALIZER_CLASS_CONFIG = 
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG;
+private static final String VALUE_SERIALIZER_CLASS_CONFIG = 
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG;
+private static final String SECURITY_PROTOCOL = 
CommonClientConfigs.SECURITY_PROTOCOL_CONFIG;
+private static final String SSL_TRUSTSTORE_LOCATION = 
SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG;
+private static final String SSL_TRUSTSTORE_PASSWORD = 
SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG;
+private static final String SSL_KEYSTORE_TYPE = 
SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG;
+private static final String SSL_KEYSTORE_LOCATION = 
SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG;
+private static final String SSL_KEYSTORE_PASSWORD = 
SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG;
 
 private String brokerList = null;
 private String topic = null;
 private String compressionType = null;
+private String securityProtocol = null;
+private String sslTruststoreLocation = null;
+private String sslTruststorePassword = null;
+private String sslKeystoreType = null;
+private String sslKeystoreLocation = null;
+private String sslKeystorePassword = null;
 
 private int retries = 0;
 private int requiredNumAcks = Integer.MAX_VALUE;
@@ -104,6 +119,54 @@ public class KafkaLog4jAppender extends AppenderSkeleton {
 this.syncSend = syncSend;
 }
 
+public String getSslTruststorePassword() {
+return sslTruststorePassword;
+}
+
+public String getSslTruststoreLocation() {
+return sslTruststoreLocation;
+}
+
+public String getSecurityProtocol() {
+return securityProtocol;
+}
+
+public void setSecurityProtocol(String 

kafka git commit: MINOR: KAFKA-2371 follow-up, DistributedHerder should wakeup WorkerGroupMember after assignment to ensure work is started immediately

2015-10-28 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 9855bb9c6 -> 8838fa801


MINOR: KAFKA-2371 follow-up, DistributedHerder should wakeup WorkerGroupMember 
after assignment to ensure work is started immediately

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #360 from ewencp/minor-kafka-2371-follow-up-wakeup-after-rebalance


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/8838fa80
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/8838fa80
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/8838fa80

Branch: refs/heads/trunk
Commit: 8838fa8010c146af6aab014a41bc7e68318b4eb0
Parents: 9855bb9
Author: Ewen Cheslack-Postava 
Authored: Wed Oct 28 12:42:03 2015 -0700
Committer: Gwen Shapira 
Committed: Wed Oct 28 12:42:03 2015 -0700

--
 .../kafka/copycat/runtime/distributed/DistributedHerder.java | 4 
 .../kafka/copycat/runtime/distributed/DistributedHerderTest.java | 2 ++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/8838fa80/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
index 17bf7b7..46c7686 100644
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
+++ 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
@@ -614,6 +614,10 @@ public class DistributedHerder implements Herder, Runnable 
{
 log.info("Joined group and got assignment: {}", assignment);
 DistributedHerder.this.assignment = assignment;
 rebalanceResolved = false;
+// We *must* interrupt any poll() call since this could occur 
when the poll starts, and we might then
+// sleep in the poll() for a long time. Forcing a wakeup 
ensures we'll get to process this event in the
+// main thread.
+member.wakeup();
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/kafka/blob/8838fa80/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
index 1213656..c8b4874 100644
--- 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
+++ 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
@@ -371,6 +371,8 @@ public class DistributedHerderTest {
 return null;
 }
 });
+member.wakeup();
+PowerMock.expectLastCall();
 }
 
 private void expectPostRebalanceCatchup(final ClusterConfigState 
readToEndSnapshot) {



kafka git commit: KAFKA-2671: Enable starting Kafka server with a Properties object

2015-10-22 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk aa56dfb9e -> 701c46b3a


KAFKA-2671: Enable starting Kafka server with a Properties object

Author: Ashish Singh 

Reviewers: Eno Thereska, Gwen Shapira

Closes #330 from SinghAsDev/KAFKA-2671


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/701c46b3
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/701c46b3
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/701c46b3

Branch: refs/heads/trunk
Commit: 701c46b3a32da78008ae48298778fca8b5a16bce
Parents: aa56dfb
Author: Ashish Singh 
Authored: Thu Oct 22 22:27:46 2015 -0700
Committer: Gwen Shapira 
Committed: Thu Oct 22 22:27:46 2015 -0700

--
 core/src/main/scala/kafka/Kafka.scala   | 12 +---
 .../main/scala/kafka/server/KafkaServerStartable.scala  | 11 ++-
 2 files changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/701c46b3/core/src/main/scala/kafka/Kafka.scala
--
diff --git a/core/src/main/scala/kafka/Kafka.scala 
b/core/src/main/scala/kafka/Kafka.scala
index 6af7b80..6b551ce 100755
--- a/core/src/main/scala/kafka/Kafka.scala
+++ b/core/src/main/scala/kafka/Kafka.scala
@@ -19,13 +19,13 @@ package kafka
 
 import java.util.Properties
 
-import scala.collection.JavaConversions._
 import joptsimple.OptionParser
-import metrics.KafkaMetricsReporter
-import server.{KafkaConfig, KafkaServerStartable, KafkaServer}
-import kafka.utils.{VerifiableProperties, CommandLineUtils, Logging}
+import kafka.server.{KafkaServer, KafkaServerStartable}
+import kafka.utils.{CommandLineUtils, Logging}
 import org.apache.kafka.common.utils.Utils
 
+import scala.collection.JavaConversions._
+
 object Kafka extends Logging {
 
   def getPropsFromArgs(args: Array[String]): Properties = {
@@ -55,9 +55,7 @@ object Kafka extends Logging {
   def main(args: Array[String]): Unit = {
 try {
   val serverProps = getPropsFromArgs(args)
-  val serverConfig = KafkaConfig.fromProps(serverProps)
-  KafkaMetricsReporter.startReporters(new 
VerifiableProperties(serverProps))
-  val kafkaServerStartable = new KafkaServerStartable(serverConfig)
+  val kafkaServerStartable = KafkaServerStartable.fromProps(serverProps)
 
   // attach shutdown handler to catch control-c
   Runtime.getRuntime().addShutdownHook(new Thread() {

http://git-wip-us.apache.org/repos/asf/kafka/blob/701c46b3/core/src/main/scala/kafka/server/KafkaServerStartable.scala
--
diff --git a/core/src/main/scala/kafka/server/KafkaServerStartable.scala 
b/core/src/main/scala/kafka/server/KafkaServerStartable.scala
index df521b3..fc98912 100644
--- a/core/src/main/scala/kafka/server/KafkaServerStartable.scala
+++ b/core/src/main/scala/kafka/server/KafkaServerStartable.scala
@@ -17,8 +17,17 @@
 
 package kafka.server
 
-import kafka.utils.Logging
+import java.util.Properties
 
+import kafka.metrics.KafkaMetricsReporter
+import kafka.utils.{VerifiableProperties, Logging}
+
+object KafkaServerStartable {
+  def fromProps(serverProps: Properties) = {
+KafkaMetricsReporter.startReporters(new VerifiableProperties(serverProps))
+new KafkaServerStartable(KafkaConfig.fromProps(serverProps))
+  }
+}
 
 class KafkaServerStartable(val serverConfig: KafkaConfig) extends Logging {
   private val server = new KafkaServer(serverConfig)



kafka git commit: KAFKA-2626: Handle null keys and value validation properly in OffsetStorageWriter.

2015-10-23 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 2e6177359 -> 6f2f1f984


KAFKA-2626: Handle null keys and value validation properly in 
OffsetStorageWriter.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #345 from ewencp/kafka-2626-offset-storage-writer-null-values


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/6f2f1f98
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/6f2f1f98
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/6f2f1f98

Branch: refs/heads/trunk
Commit: 6f2f1f9843f537b9bda3aa3951a867fdee661761
Parents: 2e61773
Author: Ewen Cheslack-Postava 
Authored: Fri Oct 23 17:01:33 2015 -0700
Committer: Gwen Shapira 
Committed: Fri Oct 23 17:01:33 2015 -0700

--
 .../kafka/copycat/storage/OffsetUtils.java  |  5 ++
 .../storage/OffsetStorageWriterTest.java| 71 +++-
 2 files changed, 59 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/6f2f1f98/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetUtils.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetUtils.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetUtils.java
index 8d78a57..9ba7662 100644
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetUtils.java
+++ 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/OffsetUtils.java
@@ -34,6 +34,11 @@ public class OffsetUtils {
 }
 
 public static  void validateFormat(Map offsetData) {
+// Both keys and values for offsets may be null. For values, this is a 
useful way to delete offsets or indicate
+// that there's not usable concept of offsets in your source system.
+if (offsetData == null)
+return;
+
 for (Map.Entry entry : offsetData.entrySet()) {
 if (!(entry.getKey() instanceof String))
 throw new DataException("Offsets may only use String keys");

http://git-wip-us.apache.org/repos/asf/kafka/blob/6f2f1f98/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/OffsetStorageWriterTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/OffsetStorageWriterTest.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/OffsetStorageWriterTest.java
index e33ecd0..3dd0b52 100644
--- 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/OffsetStorageWriterTest.java
+++ 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/OffsetStorageWriterTest.java
@@ -45,15 +45,11 @@ public class OffsetStorageWriterTest {
 private static final String NAMESPACE = "namespace";
 // Copycat format - any types should be accepted here
 private static final Map OFFSET_KEY = 
Collections.singletonMap("key", "key");
-private static final List OFFSET_KEY_WRAPPED = 
Arrays.asList(NAMESPACE, OFFSET_KEY);
 private static final Map OFFSET_VALUE = 
Collections.singletonMap("key", 12);
 
 // Serialized
 private static final byte[] OFFSET_KEY_SERIALIZED = 
"key-serialized".getBytes();
 private static final byte[] OFFSET_VALUE_SERIALIZED = 
"value-serialized".getBytes();
-private static final Map OFFSETS_SERIALIZED
-= Collections.singletonMap(ByteBuffer.wrap(OFFSET_KEY_SERIALIZED),
-ByteBuffer.wrap(OFFSET_VALUE_SERIALIZED));
 
 @Mock private OffsetBackingStore store;
 @Mock private Converter keyConverter;
@@ -79,7 +75,7 @@ public class OffsetStorageWriterTest {
 public void testWriteFlush() throws Exception {
 @SuppressWarnings("unchecked")
 Callback callback = PowerMock.createMock(Callback.class);
-expectStore(callback, false);
+expectStore(OFFSET_KEY, OFFSET_KEY_SERIALIZED, OFFSET_VALUE, 
OFFSET_VALUE_SERIALIZED, callback, false, null);
 
 PowerMock.replayAll();
 
@@ -91,6 +87,41 @@ public class OffsetStorageWriterTest {
 PowerMock.verifyAll();
 }
 
+// It should be possible to set offset values to null
+@Test
+public void testWriteNullValueFlush() throws Exception {
+@SuppressWarnings("unchecked")
+Callback callback = PowerMock.createMock(Callback.class);
+expectStore(OFFSET_KEY, OFFSET_KEY_SERIALIZED, null, null, callback, 
false, null);
+
+PowerMock.replayAll();
+
+writer.offset(OFFSET_KEY, null);
+
+assertTrue(writer.beginFlush());
+writer.doFlush(callback).get(1000, TimeUnit.MILLISECONDS);
+
+

[3/3] kafka git commit: KAFKA-2371: Add distributed support for Copycat.

2015-10-23 Thread gwenshap
KAFKA-2371: Add distributed support for Copycat.

This adds coordination between DistributedHerders using the generalized consumer
support, allowing automatic balancing of connectors and tasks across workers. A
few pieces that require interaction between workers (resolving config
inconsistencies, forwarding of configuration changes to the leader worker) are
incomplete because they require REST API support to implement properly.

Author: Ewen Cheslack-Postava 

Reviewers: Jason Gustafson, Gwen Shapira

Closes #321 from ewencp/kafka-2371-distributed-herder


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/2e617735
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/2e617735
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/2e617735

Branch: refs/heads/trunk
Commit: 2e61773590c0ba86cb8813e6ba17bf6ee33f4461
Parents: 21443f2
Author: Ewen Cheslack-Postava 
Authored: Fri Oct 23 16:37:30 2015 -0700
Committer: Gwen Shapira 
Committed: Fri Oct 23 16:37:30 2015 -0700

--
 build.gradle|   1 +
 checkstyle/import-control.xml   |   1 +
 .../clients/consumer/RoundRobinAssignor.java|  35 +-
 .../consumer/internals/AbstractCoordinator.java |   8 +-
 .../kafka/common/utils/CircularIterator.java|  54 ++
 config/copycat-distributed.properties   |   2 +
 .../kafka/copycat/file/FileStreamSinkTask.java  |  12 +-
 .../copycat/file/FileStreamSourceTask.java  |  17 +-
 .../kafka/copycat/cli/CopycatDistributed.java   |   7 +-
 .../kafka/copycat/runtime/ConnectorConfig.java  |   2 +-
 .../kafka/copycat/runtime/TaskConfig.java   |  54 ++
 .../apache/kafka/copycat/runtime/Worker.java| 145 +++-
 .../runtime/distributed/ClusterConfigState.java |  40 +-
 .../runtime/distributed/CopycatProtocol.java| 246 +++
 .../runtime/distributed/DistributedHerder.java  | 733 +--
 .../distributed/DistributedHerderConfig.java| 192 +
 .../runtime/distributed/NotLeaderException.java |  38 +
 .../runtime/distributed/WorkerCoordinator.java  | 288 
 .../runtime/distributed/WorkerGroupMember.java  | 184 +
 .../distributed/WorkerRebalanceListener.java|  38 +
 .../runtime/standalone/StandaloneHerder.java| 168 ++---
 .../copycat/storage/KafkaConfigStorage.java |  64 +-
 .../storage/KafkaOffsetBackingStore.java|   2 +
 .../kafka/copycat/util/ConnectorTaskId.java |  10 +-
 .../kafka/copycat/runtime/WorkerTest.java   | 199 -
 .../distributed/DistributedHerderTest.java  | 436 ++-
 .../distributed/WorkerCoordinatorTest.java  | 436 +++
 .../standalone/StandaloneHerderTest.java|  45 +-
 .../copycat/storage/KafkaConfigStorageTest.java |  49 +-
 .../apache/kafka/copycat/util/TestFuture.java   |  10 +-
 tests/kafkatest/services/copycat.py |  67 +-
 .../kafkatest/tests/copycat_distributed_test.py |  67 +-
 tests/kafkatest/tests/copycat_test.py   |   5 +-
 .../templates/copycat-distributed.properties|   7 +-
 34 files changed, 2966 insertions(+), 696 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/2e617735/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 16fb981..128c195 100644
--- a/build.gradle
+++ b/build.gradle
@@ -754,6 +754,7 @@ project(':copycat:runtime') {
 testCompile "$easymock"
 testCompile "$powermock"
 testCompile "$powermock_easymock"
+testCompile project(':clients').sourceSets.test.output
 testRuntime "$slf4jlog4j"
 testRuntime project(":copycat:json")
   }

http://git-wip-us.apache.org/repos/asf/kafka/blob/2e617735/checkstyle/import-control.xml
--
diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml
index 6474865..e1ea93c 100644
--- a/checkstyle/import-control.xml
+++ b/checkstyle/import-control.xml
@@ -146,6 +146,7 @@
 
 
 
+
 
 
   

http://git-wip-us.apache.org/repos/asf/kafka/blob/2e617735/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
index c5ea2bb..b8dc253 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
@@ -14,11 +14,11 @@ package org.apache.kafka.clients.consumer;
 
 import 

[1/3] kafka git commit: KAFKA-2371: Add distributed support for Copycat.

2015-10-23 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 21443f214 -> 2e6177359


http://git-wip-us.apache.org/repos/asf/kafka/blob/2e617735/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
index 0463b85..1213656 100644
--- 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
+++ 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderTest.java
@@ -17,20 +17,19 @@
 
 package org.apache.kafka.copycat.runtime.distributed;
 
-import org.apache.kafka.copycat.connector.Connector;
-import org.apache.kafka.copycat.connector.Task;
+import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.copycat.connector.ConnectorContext;
 import org.apache.kafka.copycat.runtime.ConnectorConfig;
-import org.apache.kafka.copycat.runtime.HerderConnectorContext;
+import org.apache.kafka.copycat.runtime.TaskConfig;
 import org.apache.kafka.copycat.runtime.Worker;
-import org.apache.kafka.copycat.sink.SinkConnector;
-import org.apache.kafka.copycat.sink.SinkTask;
 import org.apache.kafka.copycat.source.SourceConnector;
 import org.apache.kafka.copycat.source.SourceTask;
 import org.apache.kafka.copycat.storage.KafkaConfigStorage;
 import org.apache.kafka.copycat.util.Callback;
 import org.apache.kafka.copycat.util.ConnectorTaskId;
-import org.apache.kafka.copycat.util.FutureCallback;
+import org.apache.kafka.copycat.util.TestFuture;
 import org.easymock.EasyMock;
+import org.easymock.IAnswer;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -39,251 +38,354 @@ import org.powermock.api.easymock.annotation.Mock;
 import org.powermock.core.classloader.annotations.PowerMockIgnore;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
+import org.powermock.reflect.Whitebox;
 
-import java.util.*;
-import java.util.concurrent.TimeUnit;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
 
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({DistributedHerder.class})
+@PrepareForTest(DistributedHerder.class)
 @PowerMockIgnore("javax.management.*")
 public class DistributedHerderTest {
-private static final List CONNECTOR_NAMES = 
Arrays.asList("source-test1", "source-test2", "sink-test3");
-private static final List SOURCE_CONNECTOR_NAMES = 
Arrays.asList("source-test1", "source-test2");
-private static final List SINK_CONNECTOR_NAMES = 
Arrays.asList("sink-test3");
-private static final String TOPICS_LIST_STR = "topic1,topic2";
+private static final Map HERDER_CONFIG = new HashMap<>();
+static {
+HERDER_CONFIG.put(KafkaConfigStorage.CONFIG_TOPIC_CONFIG, 
"config-topic");
+HERDER_CONFIG.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, 
"localhost:9092");
+HERDER_CONFIG.put(DistributedHerderConfig.GROUP_ID_CONFIG, 
"test-copycat-group");
+}
 
-private static final Map CONFIG_STORAGE_CONFIG = 
Collections.singletonMap(KafkaConfigStorage.CONFIG_TOPIC_CONFIG, 
"config-topic");
+private static final String CONN1 = "sourceA";
+private static final String CONN2 = "sourceA";
+private static final ConnectorTaskId TASK0 = new ConnectorTaskId(CONN1, 0);
+private static final ConnectorTaskId TASK1 = new ConnectorTaskId(CONN1, 1);
+private static final ConnectorTaskId TASK2 = new ConnectorTaskId(CONN1, 2);
+private static final Integer MAX_TASKS = 3;
+private static final Map CONNECTOR_CONFIG = new 
HashMap<>();
+static {
+CONNECTOR_CONFIG.put(ConnectorConfig.NAME_CONFIG, "sourceA");
+CONNECTOR_CONFIG.put(ConnectorConfig.TASKS_MAX_CONFIG, 
MAX_TASKS.toString());
+CONNECTOR_CONFIG.put(ConnectorConfig.TOPICS_CONFIG, "foo,bar");
+CONNECTOR_CONFIG.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, 
BogusSourceConnector.class.getName());
+}
+private static final Map TASK_CONFIG = new HashMap<>();
+static {
+TASK_CONFIG.put(TaskConfig.TASK_CLASS_CONFIG, 
BogusSourceTask.class.getName());
+}
+private static final HashMap> 
TASK_CONFIGS = new HashMap<>();
+static {
+TASK_CONFIGS.put(TASK0, TASK_CONFIG);
+TASK_CONFIGS.put(TASK1, TASK_CONFIG);
+TASK_CONFIGS.put(TASK2, TASK_CONFIG);
+}
+private static final ClusterConfigState SNAPSHOT = new 
ClusterConfigState(1, 

[2/3] kafka git commit: KAFKA-2371: Add distributed support for Copycat.

2015-10-23 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/2e617735/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderConfig.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderConfig.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderConfig.java
new file mode 100644
index 000..bd2ba56
--- /dev/null
+++ 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerderConfig.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.copycat.runtime.distributed;
+
+import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.common.config.SSLConfigs;
+import org.apache.kafka.common.config.SaslConfigs;
+
+import java.util.Map;
+
+import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+
+public class DistributedHerderConfig extends AbstractConfig {
+private static final ConfigDef CONFIG;
+
+/*
+ * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES 
AS
+ * THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
+ */
+
+/**
+ * group.id
+ */
+public static final String GROUP_ID_CONFIG = "group.id";
+private static final String GROUP_ID_DOC = "A unique string that 
identifies the Copycat cluster group this worker belongs to.";
+
+/**
+ * session.timeout.ms
+ */
+public static final String SESSION_TIMEOUT_MS_CONFIG = 
"session.timeout.ms";
+private static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to 
detect failures when using Kafka's group management facilities.";
+
+/**
+ * heartbeat.interval.ms
+ */
+public static final String HEARTBEAT_INTERVAL_MS_CONFIG = 
"heartbeat.interval.ms";
+private static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time 
between heartbeats to the group coordinator when using Kafka's group management 
facilities. Heartbeats are used to ensure that the worker's session stays 
active and to facilitate rebalancing when new members join or leave the group. 
The value must be set lower than session.timeout.ms, but typically 
should be set no higher than 1/3 of that value. It can be adjusted even lower 
to control the expected time for normal rebalances.";
+
+/**
+ * worker.sync.timeout.ms
+ */
+public static final String WORKER_SYNC_TIMEOUT_MS_CONFIG = 
"worker.sync.timeout.ms";
+private static final String WORKER_SYNC_TIMEOUT_MS_DOC = "When the worker 
is out of sync with other workers and needs" +
+" to resynchronize configurations, wait up to this amount of time 
before giving up, leaving the group, and" +
+" waiting a backoff period before rejoining.";
+
+/**
+ * group.unsync.timeout.ms
+ */
+public static final String WORKER_UNSYNC_BACKOFF_MS_CONFIG = 
"worker.unsync.backoff.ms";
+private static final String WORKER_UNSYNC_BACKOFF_MS_DOC = "When the 
worker is out of sync with other workers and " +
+" fails to catch up within worker.sync.timeout.ms, leave the 
Copycat cluster for this long before rejoining.";
+public static final int WORKER_UNSYNC_BACKOFF_MS_DEFAULT = 5 * 60 * 1000;
+
+static {
+CONFIG = new ConfigDef()
+.define(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
+ConfigDef.Type.LIST,
+ConfigDef.Importance.HIGH,
+CommonClientConfigs.BOOSTRAP_SERVERS_DOC)
+.define(GROUP_ID_CONFIG, ConfigDef.Type.STRING, 
ConfigDef.Importance.HIGH, GROUP_ID_DOC)
+.define(SESSION_TIMEOUT_MS_CONFIG,
+ConfigDef.Type.INT,
+3,
+ConfigDef.Importance.HIGH,
+SESSION_TIMEOUT_MS_DOC)
+.define(HEARTBEAT_INTERVAL_MS_CONFIG,
+ConfigDef.Type.INT,
+   

kafka git commit: KAFKA-2797: Only run rat when in the .git repository since it require s the .gitignore to generate the list of files to ignore

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 590a4616a -> fe11488a7


KAFKA-2797: Only run rat when in the .git repository since it require s the 
.gitignore to generate the list of files to ignore

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #485 from ewencp/kafka-2797-disable-rat-when-git-missing


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/fe11488a
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/fe11488a
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/fe11488a

Branch: refs/heads/trunk
Commit: fe11488a7787732b6cc2ba306f745ea1f318208c
Parents: 590a461
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 10:17:13 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 10:17:13 2015 -0800

--
 build.gradle | 36 +++-
 1 file changed, 19 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/fe11488a/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 79fb31d..7f21a00 100644
--- a/build.gradle
+++ b/build.gradle
@@ -60,23 +60,25 @@ ext {
 
 apply from: file('wrapper.gradle')
 apply from: file('scala.gradle')
-apply from: file('gradle/rat.gradle')
-
-rat {
-  // Exclude everything under the directory that git should be ignoring via 
.gitignore or that isn't checked in. These
-  // restrict us only to files that are checked in or are staged.
-  def repo = Grgit.open(project.file('.'))
-  excludes = new ArrayList(repo.clean(ignore: false, directories: 
true, dryRun: true))
-  // And some of the files that we have checked in should also be excluded 
from this check
-  excludes.addAll([
-'**/.git/**',
-'**/build/**',
-'CONTRIBUTING.md',
-'gradlew',
-'gradlew.bat',
-'**/README.md',
-'.reviewboardrc'
-  ])
+
+if (new File('.git').exists()) {
+  apply from: file('gradle/rat.gradle')
+  rat {
+// Exclude everything under the directory that git should be ignoring via 
.gitignore or that isn't checked in. These
+// restrict us only to files that are checked in or are staged.
+def repo = Grgit.open(project.file('.'))
+excludes = new ArrayList(repo.clean(ignore: false, directories: 
true, dryRun: true))
+// And some of the files that we have checked in should also be excluded 
from this check
+excludes.addAll([
+'**/.git/**',
+'**/build/**',
+'CONTRIBUTING.md',
+'gradlew',
+'gradlew.bat',
+'**/README.md',
+'.reviewboardrc'
+])
+  }
 }
 
 subprojects {



kafka git commit: KAFKA-2797: Only run rat when in the .git repository since it require s the .gitignore to generate the list of files to ignore

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 a621677be -> 528d555fc


KAFKA-2797: Only run rat when in the .git repository since it require s the 
.gitignore to generate the list of files to ignore

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #485 from ewencp/kafka-2797-disable-rat-when-git-missing

(cherry picked from commit fe11488a7787732b6cc2ba306f745ea1f318208c)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/528d555f
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/528d555f
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/528d555f

Branch: refs/heads/0.9.0
Commit: 528d555fc39b93a52e1781bdba00db96d8554350
Parents: a621677
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 10:17:13 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 10:17:28 2015 -0800

--
 build.gradle | 36 +++-
 1 file changed, 19 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/528d555f/build.gradle
--
diff --git a/build.gradle b/build.gradle
index c694c59..d99eeec 100644
--- a/build.gradle
+++ b/build.gradle
@@ -60,23 +60,25 @@ ext {
 
 apply from: file('wrapper.gradle')
 apply from: file('scala.gradle')
-apply from: file('gradle/rat.gradle')
-
-rat {
-  // Exclude everything under the directory that git should be ignoring via 
.gitignore or that isn't checked in. These
-  // restrict us only to files that are checked in or are staged.
-  def repo = Grgit.open(project.file('.'))
-  excludes = new ArrayList(repo.clean(ignore: false, directories: 
true, dryRun: true))
-  // And some of the files that we have checked in should also be excluded 
from this check
-  excludes.addAll([
-'**/.git/**',
-'**/build/**',
-'CONTRIBUTING.md',
-'gradlew',
-'gradlew.bat',
-'**/README.md',
-'.reviewboardrc'
-  ])
+
+if (new File('.git').exists()) {
+  apply from: file('gradle/rat.gradle')
+  rat {
+// Exclude everything under the directory that git should be ignoring via 
.gitignore or that isn't checked in. These
+// restrict us only to files that are checked in or are staged.
+def repo = Grgit.open(project.file('.'))
+excludes = new ArrayList(repo.clean(ignore: false, directories: 
true, dryRun: true))
+// And some of the files that we have checked in should also be excluded 
from this check
+excludes.addAll([
+'**/.git/**',
+'**/build/**',
+'CONTRIBUTING.md',
+'gradlew',
+'gradlew.bat',
+'**/README.md',
+'.reviewboardrc'
+])
+  }
 }
 
 subprojects {



kafka git commit: KAFKA-2792: Don't wait for a response to the leave group message when closing the new consumer.

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 528d555fc -> 69f2ad8e2


KAFKA-2792: Don't wait for a response to the leave group message when closing 
the new consumer.

Author: Ewen Cheslack-Postava 

Reviewers: Onur Karaman, Gwen Shapira

Closes #480 from ewencp/kafka-2792-fix-blocking-consumer-close

(cherry picked from commit ae5a5d7c08bb634576a414f6f2864c5b8a7e58a3)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/69f2ad8e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/69f2ad8e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/69f2ad8e

Branch: refs/heads/0.9.0
Commit: 69f2ad8e22e7183946e076a8cda122aa0964fd9c
Parents: 528d555
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 10:26:51 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 10:27:04 2015 -0800

--
 .../apache/kafka/clients/consumer/KafkaConsumer.java   |  2 +-
 .../consumer/internals/AbstractCoordinator.java| 13 +
 .../consumer/internals/ConsumerCoordinatorTest.java|  4 ++--
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/69f2ad8e/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index d3616f9..89b2f0b 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -760,7 +760,7 @@ public class KafkaConsumer implements Consumer {
 try {
 log.debug("Unsubscribed all topics or patterns and assigned 
partitions");
 this.subscriptions.unsubscribe();
-this.coordinator.maybeLeaveGroup(false);
+this.coordinator.maybeLeaveGroup();
 this.metadata.needMetadataForAllTopics(false);
 } finally {
 release();

http://git-wip-us.apache.org/repos/asf/kafka/blob/69f2ad8e/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
index 5b5c8a5..a12c6c1 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
@@ -540,18 +540,18 @@ public abstract class AbstractCoordinator implements 
Closeable {
 @Override
 public void close() {
 client.disableWakeups();
-maybeLeaveGroup(true);
+maybeLeaveGroup();
 }
 
 /**
  * Leave the current group and reset local generation/memberId.
  */
-public void maybeLeaveGroup(boolean awaitResponse) {
+public void maybeLeaveGroup() {
 client.unschedule(heartbeatTask);
 if (!coordinatorUnknown() && generation > 0) {
 // this is a minimal effort attempt to leave the group. we do not
 // attempt any resending if the request fails or times out.
-sendLeaveGroupRequest(awaitResponse);
+sendLeaveGroupRequest();
 }
 
 this.generation = OffsetCommitRequest.DEFAULT_GENERATION_ID;
@@ -559,7 +559,7 @@ public abstract class AbstractCoordinator implements 
Closeable {
 rejoinNeeded = true;
 }
 
-private void sendLeaveGroupRequest(boolean awaitResponse) {
+private void sendLeaveGroupRequest() {
 LeaveGroupRequest request = new LeaveGroupRequest(groupId, memberId);
 RequestFuture future = client.send(coordinator, 
ApiKeys.LEAVE_GROUP, request)
 .compose(new LeaveGroupResponseHandler());
@@ -574,10 +574,7 @@ public abstract class AbstractCoordinator implements 
Closeable {
 }
 });
 
-if (awaitResponse)
-client.poll(future);
-else
-client.poll(future, 0);
+client.poll(future, 0);
 }
 
 private class LeaveGroupResponseHandler extends 
CoordinatorResponseHandler {

http://git-wip-us.apache.org/repos/asf/kafka/blob/69f2ad8e/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
--
diff --git 

kafka git commit: KAFKA-2792: Don't wait for a response to the leave group message when closing the new consumer.

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk fe11488a7 -> ae5a5d7c0


KAFKA-2792: Don't wait for a response to the leave group message when closing 
the new consumer.

Author: Ewen Cheslack-Postava 

Reviewers: Onur Karaman, Gwen Shapira

Closes #480 from ewencp/kafka-2792-fix-blocking-consumer-close


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/ae5a5d7c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/ae5a5d7c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/ae5a5d7c

Branch: refs/heads/trunk
Commit: ae5a5d7c08bb634576a414f6f2864c5b8a7e58a3
Parents: fe11488
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 10:26:51 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 10:26:51 2015 -0800

--
 .../apache/kafka/clients/consumer/KafkaConsumer.java   |  2 +-
 .../consumer/internals/AbstractCoordinator.java| 13 +
 .../consumer/internals/ConsumerCoordinatorTest.java|  4 ++--
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/ae5a5d7c/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java 
b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
index d3616f9..89b2f0b 100644
--- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
@@ -760,7 +760,7 @@ public class KafkaConsumer implements Consumer {
 try {
 log.debug("Unsubscribed all topics or patterns and assigned 
partitions");
 this.subscriptions.unsubscribe();
-this.coordinator.maybeLeaveGroup(false);
+this.coordinator.maybeLeaveGroup();
 this.metadata.needMetadataForAllTopics(false);
 } finally {
 release();

http://git-wip-us.apache.org/repos/asf/kafka/blob/ae5a5d7c/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
index 5b5c8a5..a12c6c1 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
@@ -540,18 +540,18 @@ public abstract class AbstractCoordinator implements 
Closeable {
 @Override
 public void close() {
 client.disableWakeups();
-maybeLeaveGroup(true);
+maybeLeaveGroup();
 }
 
 /**
  * Leave the current group and reset local generation/memberId.
  */
-public void maybeLeaveGroup(boolean awaitResponse) {
+public void maybeLeaveGroup() {
 client.unschedule(heartbeatTask);
 if (!coordinatorUnknown() && generation > 0) {
 // this is a minimal effort attempt to leave the group. we do not
 // attempt any resending if the request fails or times out.
-sendLeaveGroupRequest(awaitResponse);
+sendLeaveGroupRequest();
 }
 
 this.generation = OffsetCommitRequest.DEFAULT_GENERATION_ID;
@@ -559,7 +559,7 @@ public abstract class AbstractCoordinator implements 
Closeable {
 rejoinNeeded = true;
 }
 
-private void sendLeaveGroupRequest(boolean awaitResponse) {
+private void sendLeaveGroupRequest() {
 LeaveGroupRequest request = new LeaveGroupRequest(groupId, memberId);
 RequestFuture future = client.send(coordinator, 
ApiKeys.LEAVE_GROUP, request)
 .compose(new LeaveGroupResponseHandler());
@@ -574,10 +574,7 @@ public abstract class AbstractCoordinator implements 
Closeable {
 }
 });
 
-if (awaitResponse)
-client.poll(future);
-else
-client.poll(future, 0);
+client.poll(future, 0);
 }
 
 private class LeaveGroupResponseHandler extends 
CoordinatorResponseHandler {

http://git-wip-us.apache.org/repos/asf/kafka/blob/ae5a5d7c/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
--
diff --git 
a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java
 

kafka git commit: KAFKA-2798: Use prefixedd configurations for Kafka Connect producer and consumer settings so they do not conflict with the distributed herder's settings.

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk ae5a5d7c0 -> 403d89ede


KAFKA-2798: Use prefixedd configurations for Kafka Connect producer and 
consumer settings so they do not conflict with the distributed herder's 
settings.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #486 from ewencp/kafka-2798-conflicting-herder-producer-consumer-configs


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/403d89ed
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/403d89ed
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/403d89ed

Branch: refs/heads/trunk
Commit: 403d89edeaa7808f71c0e7318411c925895210f2
Parents: ae5a5d7
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 11:07:26 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 11:07:26 2015 -0800

--
 .../java/org/apache/kafka/common/config/AbstractConfig.java  | 8 
 .../main/java/org/apache/kafka/connect/runtime/Worker.java   | 2 +-
 .../org/apache/kafka/connect/runtime/WorkerSinkTask.java | 5 -
 3 files changed, 5 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/403d89ed/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java 
b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
index 07b64c0..1029356 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
@@ -105,14 +105,6 @@ public class AbstractConfig {
 return keys;
 }
 
-public Map unusedConfigs() {
-Set unusedKeys = this.unused();
-Map unusedProps = new HashMap<>();
-for (String key : unusedKeys)
-unusedProps.put(key, this.originals.get(key));
-return unusedProps;
-}
-
 public Map originals() {
 Map copy = new RecordingMap<>();
 copy.putAll(originals);

http://git-wip-us.apache.org/repos/asf/kafka/blob/403d89ed/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
index 359a79c..f5b23ec 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
@@ -101,7 +101,7 @@ public class Worker {
 producerProps.put(ProducerConfig.ACKS_CONFIG, "all");
 
producerProps.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");
 
-producerProps.putAll(config.unusedConfigs());
+producerProps.putAll(config.originalsWithPrefix("producer."));
 
 producer = new KafkaProducer<>(producerProps);
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/403d89ed/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
index 643b10e..e0a3e04 100644
--- 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
@@ -233,7 +233,8 @@ class WorkerSinkTask implements WorkerTask {
 private KafkaConsumer createConsumer() {
 // Include any unknown worker configs so consumer configs can be set 
globally on the worker
 // and through to the task
-Map props = workerConfig.unusedConfigs();
+Map props = new HashMap<>();
+
 props.put(ConsumerConfig.GROUP_ID_CONFIG, "connect-" + id.connector());
 props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
 
Utils.join(workerConfig.getList(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG), ","));
@@ -242,6 +243,8 @@ class WorkerSinkTask implements WorkerTask {
 props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
 props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
 
+props.putAll(workerConfig.originalsWithPrefix("consumer."));
+
 

kafka git commit: KAFKA-2798: Use prefixedd configurations for Kafka Connect producer and consumer settings so they do not conflict with the distributed herder's settings.

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 69f2ad8e2 -> dd8a870e4


KAFKA-2798: Use prefixedd configurations for Kafka Connect producer and 
consumer settings so they do not conflict with the distributed herder's 
settings.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #486 from ewencp/kafka-2798-conflicting-herder-producer-consumer-configs

(cherry picked from commit 403d89edeaa7808f71c0e7318411c925895210f2)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/dd8a870e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/dd8a870e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/dd8a870e

Branch: refs/heads/0.9.0
Commit: dd8a870e4df11cad009498ebfb9b6eac78f1652f
Parents: 69f2ad8
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 11:07:26 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 11:07:40 2015 -0800

--
 .../java/org/apache/kafka/common/config/AbstractConfig.java  | 8 
 .../main/java/org/apache/kafka/connect/runtime/Worker.java   | 2 +-
 .../org/apache/kafka/connect/runtime/WorkerSinkTask.java | 5 -
 3 files changed, 5 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/dd8a870e/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java 
b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
index 07b64c0..1029356 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
@@ -105,14 +105,6 @@ public class AbstractConfig {
 return keys;
 }
 
-public Map unusedConfigs() {
-Set unusedKeys = this.unused();
-Map unusedProps = new HashMap<>();
-for (String key : unusedKeys)
-unusedProps.put(key, this.originals.get(key));
-return unusedProps;
-}
-
 public Map originals() {
 Map copy = new RecordingMap<>();
 copy.putAll(originals);

http://git-wip-us.apache.org/repos/asf/kafka/blob/dd8a870e/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
index 359a79c..f5b23ec 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
@@ -101,7 +101,7 @@ public class Worker {
 producerProps.put(ProducerConfig.ACKS_CONFIG, "all");
 
producerProps.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");
 
-producerProps.putAll(config.unusedConfigs());
+producerProps.putAll(config.originalsWithPrefix("producer."));
 
 producer = new KafkaProducer<>(producerProps);
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/dd8a870e/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
index 643b10e..e0a3e04 100644
--- 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
@@ -233,7 +233,8 @@ class WorkerSinkTask implements WorkerTask {
 private KafkaConsumer createConsumer() {
 // Include any unknown worker configs so consumer configs can be set 
globally on the worker
 // and through to the task
-Map props = workerConfig.unusedConfigs();
+Map props = new HashMap<>();
+
 props.put(ConsumerConfig.GROUP_ID_CONFIG, "connect-" + id.connector());
 props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
 
Utils.join(workerConfig.getList(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG), ","));
@@ -242,6 +243,8 @@ class WorkerSinkTask implements WorkerTask {
 props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
 props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 

kafka git commit: KAFKA-2793: Use ByteArrayDeserializer instead of StringDeserializer for keys in ConsoleConsumer with new consumer.

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 60c06734b -> 64a1bfeb9


KAFKA-2793: Use ByteArrayDeserializer instead of StringDeserializer for keys in 
ConsoleConsumer with new consumer.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #482 from ewencp/kafka-2793-console-consumer-new-consumer-deserializer


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/64a1bfeb
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/64a1bfeb
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/64a1bfeb

Branch: refs/heads/trunk
Commit: 64a1bfeb9b1b90c94cc62ee2587f9745c850ada3
Parents: 60c0673
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 14:43:25 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 14:43:25 2015 -0800

--
 core/src/main/scala/kafka/tools/ConsoleConsumer.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/64a1bfeb/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala 
b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
index 2b1a69a..0dedcd9 100755
--- a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
+++ b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
@@ -162,7 +162,7 @@ object ConsoleConsumer extends Logging {
 props.putAll(config.consumerProps)
 props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, if 
(config.options.has(config.resetBeginningOpt)) "earliest" else "latest")
 props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.bootstrapServer)
-props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, if 
(config.keyDeserializer != null) config.keyDeserializer else 
"org.apache.kafka.common.serialization.StringDeserializer")
+props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, if 
(config.keyDeserializer != null) config.keyDeserializer else 
"org.apache.kafka.common.serialization.ByteArrayDeserializer")
 props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, if 
(config.valueDeserializer != null) config.valueDeserializer else 
"org.apache.kafka.common.serialization.ByteArrayDeserializer")
 
 props



kafka git commit: KAFKA-2793: Use ByteArrayDeserializer instead of StringDeserializer for keys in ConsoleConsumer with new consumer.

2015-11-10 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 501ac0282 -> 75db96e42


KAFKA-2793: Use ByteArrayDeserializer instead of StringDeserializer for keys in 
ConsoleConsumer with new consumer.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #482 from ewencp/kafka-2793-console-consumer-new-consumer-deserializer

(cherry picked from commit 64a1bfeb9b1b90c94cc62ee2587f9745c850ada3)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/75db96e4
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/75db96e4
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/75db96e4

Branch: refs/heads/0.9.0
Commit: 75db96e429de34373b92d8f4f5df508071b05127
Parents: 501ac02
Author: Ewen Cheslack-Postava 
Authored: Tue Nov 10 14:43:25 2015 -0800
Committer: Gwen Shapira 
Committed: Tue Nov 10 14:45:18 2015 -0800

--
 core/src/main/scala/kafka/tools/ConsoleConsumer.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/75db96e4/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
--
diff --git a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala 
b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
index 9f296bd..6504925 100755
--- a/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
+++ b/core/src/main/scala/kafka/tools/ConsoleConsumer.scala
@@ -162,7 +162,7 @@ object ConsoleConsumer extends Logging {
 props.putAll(config.consumerProps)
 props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, if 
(config.options.has(config.resetBeginningOpt)) "earliest" else "latest")
 props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.bootstrapServer)
-props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, if 
(config.keyDeserializer != null) config.keyDeserializer else 
"org.apache.kafka.common.serialization.StringDeserializer")
+props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, if 
(config.keyDeserializer != null) config.keyDeserializer else 
"org.apache.kafka.common.serialization.ByteArrayDeserializer")
 props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, if 
(config.valueDeserializer != null) config.valueDeserializer else 
"org.apache.kafka.common.serialization.ByteArrayDeserializer")
 
 props



kafka git commit: MINOR: remove old producer in config sections to align with APIs

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 359be3a68 -> a87b97822


MINOR: remove old producer in config sections to align with APIs

Author: Guozhang Wang 

Reviewers: Gwen Shapira

Closes #468 from guozhangwang/WikiUpdate


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/a87b9782
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/a87b9782
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/a87b9782

Branch: refs/heads/trunk
Commit: a87b978223a827c34db1341b2da3e17e571c4464
Parents: 359be3a
Author: Guozhang Wang 
Authored: Mon Nov 9 13:36:35 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 13:36:35 2015 -0800

--
 docs/configuration.html | 158 +--
 docs/documentation.html |   7 +-
 2 files changed, 19 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/a87b9782/docs/configuration.html
--
diff --git a/docs/configuration.html b/docs/configuration.html
index e4019ba..b33cf6a 100644
--- a/docs/configuration.html
+++ b/docs/configuration.html
@@ -32,7 +32,7 @@ Topic-level configurations and defaults are discussed in more 
detail Topic-level configuration
+Topic-level configuration
 
 Configurations pertinent to topics have both a global default as well an 
optional per-topic override. If no per-topic configuration is given the global 
default is used. The override can be set at topic creation time by giving one 
or more --config options. This example creates a topic named 
my-topic with a custom max message size and flush rate:
 
@@ -106,7 +106,7 @@ The following are the topic-level configurations. The 
server's default configura
   min.insync.replicas
   1
   min.insync.replicas
-  When a producer sets request.required.acks to -1, 
min.insync.replicas specifies the minimum number of replicas that must 
acknowledge a write for the write to be considered successful. If this minimum 
cannot be met, then the producer will raise an exception (either 
NotEnoughReplicas or NotEnoughReplicasAfterAppend). 
+  When a producer sets request.required.acks to -1, 
min.insync.replicas specifies the minimum number of replicas that must 
acknowledge a write for the write to be considered successful. If this minimum 
cannot be met, then the producer will raise an exception (either 
NotEnoughReplicas or NotEnoughReplicasAfterAppend).
   When used together, min.insync.replicas and request.required.acks allow 
you to enforce greater durability guarantees. A typical scenario would be to 
create a topic with a replication factor of 3, set min.insync.replicas to 2, 
and produce with request.required.acks of -1. This will ensure that the 
producer raises an exception if a majority of replicas do not receive a 
write.
 
 
@@ -147,7 +147,17 @@ The following are the topic-level configurations. The 
server's default configura
 
 
 
-3.2 Consumer Configs
+3.2 Producer Configs
+
+Below is the configuration of the Java producer:
+
+
+
+For those interested in the legacy Scala producer configs, information can 
be found http://kafka.apache.org/082/documentation.html#producerconfigs;>
+here.
+
+
+3.3 Consumer Configs
 The essential consumer configurations are the following:
 
 group.id
@@ -317,142 +327,6 @@ The essential consumer configurations are the following:
 
 More details about consumer configuration can be found in the scala class 
kafka.consumer.ConsumerConfig.
 
-3.3 New Consumer Configs
-Since 0.9.0.0 we have been working on a replacement for our existing 
simple and high-level consumers. The code can be considered beta quality. Below 
is the configuration for the new consumer: 
-
-
-3.4 Kafka Producer Configs
-
-We recommend using KafkaProducer as SyncProducer and AsyncProducer will be 
deprecated in the next release. Below is the configuration for the new 
producer:
-
-
-3.5 Old Producer Configs
-We recommend not to use the Scala SyncProducer and AsyncProducer as they will 
be deprecated in the next release. Below you can see their configuration:
-
-metadata.broker.list
-request.required.acks
-producer.type
-serializer.class
-
-
-
-
-Property
-Default
-Description
-  
-
-  metadata.broker.list
-  
-  
-This is for bootstrapping and the producer will only use it for 
getting metadata (topics, partitions and replicas). The socket connections for 
sending the actual data will be established based on the broker information 
returned in the metadata. The format is host1:port1,host2:port2, and the list 
can be a subset of brokers or a VIP pointing to a subset of 

kafka git commit: KAFKA-2778: Use zero loss settings by default for Connect source producers.

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk bc76e6704 -> 13ba57dcf


KAFKA-2778: Use zero loss settings by default for Connect source producers.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #459 from ewencp/kafka-2778-connect-source-zero-loss-settings


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/13ba57dc
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/13ba57dc
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/13ba57dc

Branch: refs/heads/trunk
Commit: 13ba57dcfc45e60de281cc55125e7446322308ba
Parents: bc76e67
Author: Ewen Cheslack-Postava 
Authored: Mon Nov 9 10:36:57 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 10:36:57 2015 -0800

--
 .../java/org/apache/kafka/connect/runtime/Worker.java   |  9 +
 .../apache/kafka/connect/runtime/WorkerSourceTask.java  | 12 ++--
 2 files changed, 19 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/13ba57dc/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
index 2e359d6..359a79c 100644
--- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
+++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java
@@ -92,6 +92,15 @@ public class Worker {
 producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, 
Utils.join(config.getList(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG), ","));
 producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArraySerializer");
 producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArraySerializer");
+
+// These settings are designed to ensure there is no data loss. They 
*may* be overridden via configs passed to the
+// worker, but this may compromise the delivery guarantees of Kafka 
Connect.
+producerProps.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, ((Integer) 
Integer.MAX_VALUE).toString());
+producerProps.put(ProducerConfig.RETRIES_CONFIG, ((Integer) 
Integer.MAX_VALUE).toString());
+producerProps.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, ((Long) 
Long.MAX_VALUE).toString());
+producerProps.put(ProducerConfig.ACKS_CONFIG, "all");
+
producerProps.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");
+
 producerProps.putAll(config.unusedConfigs());
 
 producer = new KafkaProducer<>(producerProps);

http://git-wip-us.apache.org/repos/asf/kafka/blob/13ba57dc/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
index 141e430..6cf1dd7 100644
--- 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
@@ -122,7 +122,7 @@ class WorkerSourceTask implements WorkerTask {
  * @param records
  */
 private synchronized void sendRecords(List records) {
-for (SourceRecord record : records) {
+for (final SourceRecord record : records) {
 byte[] key = keyConverter.fromConnectData(record.topic(), 
record.keySchema(), record.key());
 byte[] value = valueConverter.fromConnectData(record.topic(), 
record.valueSchema(), record.value());
 final ProducerRecord producerRecord = new 
ProducerRecord<>(record.topic(), record.kafkaPartition(), key, value);
@@ -138,7 +138,15 @@ class WorkerSourceTask implements WorkerTask {
 @Override
 public void onCompletion(RecordMetadata 
recordMetadata, Exception e) {
 if (e != null) {
-log.error("Failed to send record: ", e);
+// Given the default settings for zero data 
loss, this should basically never happen --
+// between "infinite" retries, indefinite 
blocking on full buffers, and "infinite" request
+// timeouts, callbacks with exceptions should 
never be invoked in practice. If the
+// user overrode these settings, the best we 
can do is 

[2/2] kafka git commit: KAFKA-2783; Drop outdated hadoop contrib modules

2015-11-09 Thread gwenshap
KAFKA-2783; Drop outdated hadoop contrib modules

Author: Grant Henke 

Reviewers: Gwen Shapira

Closes #466 from granthenke/drop-contrib

(cherry picked from commit 69af573b35f04657e31f60e636aba19ffa0b2c84)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e176fcc7
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e176fcc7
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e176fcc7

Branch: refs/heads/0.9.0
Commit: e176fcc7fb146bf9be2d8d8f2a4f5e02f4753731
Parents: f22ea29
Author: Grant Henke 
Authored: Mon Nov 9 11:02:46 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 11:03:07 2015 -0800

--
 README.md   |   2 +-
 build.gradle|  51 +--
 contrib/LICENSE |   1 -
 contrib/NOTICE  |   1 -
 contrib/hadoop-consumer/README  |  66 ---
 contrib/hadoop-consumer/copy-jars.sh|  69 ---
 contrib/hadoop-consumer/hadoop-setup.sh |  20 -
 contrib/hadoop-consumer/run-class.sh|  65 ---
 .../main/java/kafka/etl/KafkaETLContext.java| 270 ---
 .../java/kafka/etl/KafkaETLInputFormat.java |  78 
 .../src/main/java/kafka/etl/KafkaETLJob.java| 172 ---
 .../src/main/java/kafka/etl/KafkaETLKey.java| 104 -
 .../java/kafka/etl/KafkaETLRecordReader.java| 180 
 .../main/java/kafka/etl/KafkaETLRequest.java| 129 --
 .../src/main/java/kafka/etl/KafkaETLUtils.java  | 205 -
 .../src/main/java/kafka/etl/Props.java  | 458 ---
 .../kafka/etl/UndefinedPropertyException.java   |  28 --
 .../main/java/kafka/etl/impl/DataGenerator.java | 134 --
 .../java/kafka/etl/impl/SimpleKafkaETLJob.java  | 104 -
 .../kafka/etl/impl/SimpleKafkaETLMapper.java|  91 
 contrib/hadoop-consumer/test/test.properties|  42 --
 contrib/hadoop-producer/README.md   |  94 
 .../kafka/bridge/examples/TextPublisher.java|  66 ---
 .../kafka/bridge/hadoop/KafkaOutputFormat.java  | 144 --
 .../kafka/bridge/hadoop/KafkaRecordWriter.java  |  88 
 .../java/kafka/bridge/pig/AvroKafkaStorage.java | 115 -
 docs/api.html   |  20 +-
 docs/documentation.html |   2 +-
 docs/implementation.html|  88 ++--
 settings.gradle |   4 +-
 30 files changed, 54 insertions(+), 2837 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e176fcc7/README.md
--
diff --git a/README.md b/README.md
index dc15923..6baa17e 100644
--- a/README.md
+++ b/README.md
@@ -60,7 +60,7 @@ The release file can be found inside 
./core/build/distributions/.
 ./gradlew -PscalaVersion=2.11.7 releaseTarGz
 
 ### Running a task for a specific project ###
-This is for 'core', 'contrib:hadoop-consumer', 'contrib:hadoop-producer', 
'examples' and 'clients'
+This is for 'core', 'examples' and 'clients'
 ./gradlew core:jar
 ./gradlew core:test
 

http://git-wip-us.apache.org/repos/asf/kafka/blob/e176fcc7/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 4ea0ee3..f9fd42a 100644
--- a/build.gradle
+++ b/build.gradle
@@ -229,7 +229,7 @@ for ( sv in ['2_10_5', '2_11_7'] ) {
 }
 
 def connectPkgs = ['connect:api', 'connect:runtime', 'connect:json', 
'connect:file']
-def pkgs = ['clients', 'examples', 'contrib:hadoop-consumer', 
'contrib:hadoop-producer', 'log4j-appender', 'tools', 'streams'] + connectPkgs
+def pkgs = ['clients', 'examples', 'log4j-appender', 'tools', 'streams'] + 
connectPkgs
 
 tasks.create(name: "jarConnect", dependsOn: connectPkgs.collect { it + ":jar" 
}) {}
 tasks.create(name: "jarAll", dependsOn: ['jar_core_2_10_5', 'jar_core_2_11_7'] 
+ pkgs.collect { it + ":jar" }) { }
@@ -376,55 +376,6 @@ project(':core') {
   }
 }
 
-project(':contrib:hadoop-consumer') {
-  archivesBaseName = "kafka-hadoop-consumer"
-
-  dependencies {
-compile project(':core')
-compile "org.apache.avro:avro:1.4.0"
-compile "org.apache.pig:pig:0.8.0"
-compile "commons-logging:commons-logging:1.0.4"
-compile "org.codehaus.jackson:jackson-core-asl:1.5.5"
-compile "org.codehaus.jackson:jackson-mapper-asl:1.5.5"
-compile "org.apache.hadoop:hadoop-core:0.20.2"
-  }
-
-  configurations {
-// manually excludes some unnecessary dependencies
-compile.exclude module: 'javax'
-compile.exclude module: 'jms'
-compile.exclude module: 'jmxri'
-compile.exclude module: 'jmxtools'
-   

kafka git commit: KAFKA-2776: Fix lookup of schema conversion cache size in JsonConverter.

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk a24f9a23a -> e9fc7b8c8


KAFKA-2776: Fix lookup of schema conversion cache size in JsonConverter.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #458 from ewencp/kafka-2776-json-converter-cache-config-fix


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e9fc7b8c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e9fc7b8c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e9fc7b8c

Branch: refs/heads/trunk
Commit: e9fc7b8c84908ae642339a2522a79f8bb5155728
Parents: a24f9a2
Author: Ewen Cheslack-Postava 
Authored: Mon Nov 9 10:19:27 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 10:19:27 2015 -0800

--
 .../main/java/org/apache/kafka/connect/json/JsonConverter.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e9fc7b8c/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
--
diff --git 
a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java 
b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
index 5e47ad2..815d32b 100644
--- 
a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
+++ 
b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
@@ -44,7 +44,7 @@ import java.util.Map;
 public class JsonConverter implements Converter {
 private static final String SCHEMAS_ENABLE_CONFIG = "schemas.enable";
 private static final boolean SCHEMAS_ENABLE_DEFAULT = true;
-private static final String SCHEMAS_CACHE_CONFIG = "schemas.cache.size";
+private static final String SCHEMAS_CACHE_SIZE_CONFIG = 
"schemas.cache.size";
 private static final int SCHEMAS_CACHE_SIZE_DEFAULT = 1000;
 
 private static final HashMap 
TO_CONNECT_CONVERTERS = new HashMap<>();
@@ -293,7 +293,7 @@ public class JsonConverter implements Converter {
 serializer.configure(configs, isKey);
 deserializer.configure(configs, isKey);
 
-Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_DEFAULT);
+Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG);
 if (cacheSizeVal != null)
 cacheSize = (int) cacheSizeVal;
 fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache(cacheSize));



[1/2] kafka git commit: KAFKA-2783; Drop outdated hadoop contrib modules

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 f22ea2970 -> e176fcc7f


http://git-wip-us.apache.org/repos/asf/kafka/blob/e176fcc7/contrib/hadoop-producer/README.md
--
diff --git a/contrib/hadoop-producer/README.md 
b/contrib/hadoop-producer/README.md
deleted file mode 100644
index a5bef73..000
--- a/contrib/hadoop-producer/README.md
+++ /dev/null
@@ -1,94 +0,0 @@
-Hadoop to Kafka Bridge
-==
-
-What's new?

-
-* Kafka 0.8 support
-  * No more ZK-based load balancing (backwards incompatible change)
-* Semantic partitioning is now supported in KafkaOutputFormat. Just specify a
-  key in the output committer of your job. The Pig StoreFunc doesn't support
-  semantic partitioning.
-* Config parameters are now the same as the Kafka producer, just prepended with
-  kafka.output (e.g., kafka.output.max.message.size). This is a backwards
-  incompatible change.
-
-What is it?

-
-The Hadoop to Kafka bridge is a way to publish data from Hadoop to Kafka. There
-are two possible mechanisms, varying from easy to difficult: writing a Pig
-script and writing messages in Avro format, or rolling your own job using the
-Kafka `OutputFormat`. 
-
-Note that there are no write-once semantics: any client of the data must handle
-messages in an idempotent manner. That is, because of node failures and
-Hadoop's failure recovery, it's possible that the same message is published
-multiple times in the same push.
-
-How do I use it?
-
-
-With this bridge, Kafka topics are URIs and are specified as URIs of the form
-`kafka:///` to connect to a specific Kafka broker.
-
-### Pig ###
-
-Pig bridge writes data in binary Avro format with one message created per input
-row. To push data via Kafka, store to the Kafka URI using `AvroKafkaStorage`
-with the Avro schema as its first argument. You'll need to register the
-appropriate Kafka JARs. Here is what an example Pig script looks like:
-
-REGISTER hadoop-producer_2.8.0-0.8.0.jar;
-REGISTER avro-1.4.0.jar;
-REGISTER piggybank.jar;
-REGISTER kafka-0.8.0.jar;
-REGISTER jackson-core-asl-1.5.5.jar;
-REGISTER jackson-mapper-asl-1.5.5.jar;
-REGISTER scala-library.jar;
-
-member_info = LOAD 'member_info.tsv' AS (member_id : int, name : 
chararray);
-names = FOREACH member_info GENERATE name;
-STORE member_info INTO 'kafka://my-kafka:9092/member_info' USING 
kafka.bridge.AvroKafkaStorage('"string"');
-
-That's it! The Pig StoreFunc makes use of AvroStorage in Piggybank to convert
-from Pig's data model to the specified Avro schema.
-
-Further, multi-store is possible with KafkaStorage, so you can easily write to
-multiple topics and brokers in the same job:
-
-SPLIT member_info INTO early_adopters IF member_id < 1000, others IF 
member_id >= 1000;
-STORE early_adopters INTO 'kafka://my-broker:9092/early_adopters' USING 
AvroKafkaStorage('$schema');
-STORE others INTO 'kafka://my-broker2:9092/others' USING 
AvroKafkaStorage('$schema');
-
-### KafkaOutputFormat ###
-
-KafkaOutputFormat is a Hadoop OutputFormat for publishing data via Kafka. It
-uses the newer 0.20 mapreduce APIs and simply pushes bytes (i.e.,
-BytesWritable). This is a lower-level method of publishing data, as it allows
-you to precisely control output.
-
-Included is an example that publishes some input text line-by-line to a topic.
-With KafkaOutputFormat, the key can be a null, where it is ignored by the
-producer (random partitioning), or any object for semantic partitioning of the
-stream (with an appropriate Kafka partitioner set). Speculative execution is
-turned off by the OutputFormat.
-
-What can I tune?
-
-
-* kafka.output.queue.bytes: Bytes to queue in memory before pushing to the 
Kafka
-  producer (i.e., the batch size). Default is 1,000,000 (1 million) bytes.
-
-Any of Kafka's producer parameters can be changed by prefixing them with
-"kafka.output" in one's job configuration. For example, to change the
-compression codec, one would add the "kafka.output.compression.codec" parameter
-(e.g., "SET kafka.output.compression.codec 0" in one's Pig script for no
-compression). 
-
-For easier debugging, the above values as well as the Kafka broker information
-(kafka.metadata.broker.list), the topic (kafka.output.topic), and the schema
-(kafka.output.schema) are injected into the job's configuration. By default,
-the Hadoop producer uses Kafka's sync producer as asynchronous operation
-doesn't make sense in the batch Hadoop case.
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/e176fcc7/contrib/hadoop-producer/src/main/java/kafka/bridge/examples/TextPublisher.java
--
diff --git 
a/contrib/hadoop-producer/src/main/java/kafka/bridge/examples/TextPublisher.java
 
b/contrib/hadoop-producer/src/main/java/kafka/bridge/examples/TextPublisher.java
deleted 

kafka git commit: KAFKA-2775: Move exceptions into API package for Kafka Connect.

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 2b04004de -> bc76e6704


KAFKA-2775: Move exceptions into API package for Kafka Connect.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #457 from ewencp/kafka-2775-exceptions-in-api-package


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/bc76e670
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/bc76e670
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/bc76e670

Branch: refs/heads/trunk
Commit: bc76e6704e8f14d59bb5d4fcf9bdf544c9e463bf
Parents: 2b04004
Author: Ewen Cheslack-Postava 
Authored: Mon Nov 9 10:27:18 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 10:27:18 2015 -0800

--
 .../connect/errors/AlreadyExistsException.java  | 35 
 .../kafka/connect/errors/NotFoundException.java | 35 
 .../connect/errors/RetriableException.java  | 35 
 .../connect/errors/AlreadyExistsException.java  | 35 
 .../kafka/connect/errors/NotFoundException.java | 35 
 .../connect/errors/RetriableException.java  | 35 
 6 files changed, 105 insertions(+), 105 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/bc76e670/connect/api/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
--
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
 
b/connect/api/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
new file mode 100644
index 000..6fdefdf
--- /dev/null
+++ 
b/connect/api/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.errors;
+
+/**
+ * Indicates the operation tried to create an entity that already exists.
+ */
+public class AlreadyExistsException extends ConnectException {
+public AlreadyExistsException(String s) {
+super(s);
+}
+
+public AlreadyExistsException(String s, Throwable throwable) {
+super(s, throwable);
+}
+
+public AlreadyExistsException(Throwable throwable) {
+super(throwable);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/bc76e670/connect/api/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
--
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
 
b/connect/api/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
new file mode 100644
index 000..a3bbe91
--- /dev/null
+++ 
b/connect/api/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.errors;
+
+/**
+ * Indicates that an operation attempted to modify or delete a connector or 
task that is not present on the worker.
+ */
+public class NotFoundException extends ConnectException {
+public NotFoundException(String s) {
+super(s);
+}
+
+   

kafka git commit: KAFKA-2782: Fix KafkaBasedLogTest assertion and move it to the main test thread.

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 4069011ee -> 0d2fdfa81


KAFKA-2782: Fix KafkaBasedLogTest assertion and move it to the main test thread.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #463 from ewencp/kafka-2782-fix-kafka-based-log-test-assertion

(cherry picked from commit 75f2b8c8b42459a3ab2962f75f0e05fbbb9e8333)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/0d2fdfa8
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/0d2fdfa8
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/0d2fdfa8

Branch: refs/heads/0.9.0
Commit: 0d2fdfa81566c6645dbc818fbd3722ddda90b87c
Parents: 4069011
Author: Ewen Cheslack-Postava 
Authored: Mon Nov 9 10:39:34 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 10:39:50 2015 -0800

--
 .../apache/kafka/connect/util/KafkaBasedLogTest.java | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/0d2fdfa8/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
--
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
index 1c3b842..2ead813 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
@@ -258,14 +258,11 @@ public class KafkaBasedLogTest {
 assertEquals(2, invoked.get());
 
 // Now we should have to wait for the records to be read back when we 
call readToEnd()
-final AtomicBoolean getInvokedAndPassed = new AtomicBoolean(false);
+final AtomicBoolean getInvoked = new AtomicBoolean(false);
 final FutureCallback readEndFutureCallback = new 
FutureCallback<>(new Callback() {
 @Override
 public void onCompletion(Throwable error, Void result) {
-assertEquals(4, consumedRecords.size());
-assertEquals(TP0_VALUE_NEW, consumedRecords.get(2).value());
-assertEquals(TP1_VALUE_NEW, consumedRecords.get(3).value());
-getInvokedAndPassed.set(true);
+getInvoked.set(true);
 }
 });
 consumer.schedulePollTask(new Runnable() {
@@ -275,7 +272,6 @@ public class KafkaBasedLogTest {
 // that should follow. This readToEnd call will immediately 
wakeup this consumer.poll() call without
 // returning any data.
 store.readToEnd(readEndFutureCallback);
-
 // Needs to seek to end to find end offsets
 consumer.schedulePollTask(new Runnable() {
 @Override
@@ -311,7 +307,12 @@ public class KafkaBasedLogTest {
 }
 });
 readEndFutureCallback.get(1, TimeUnit.MILLISECONDS);
-assertTrue(getInvokedAndPassed.get());
+assertTrue(getInvoked.get());
+assertEquals(4, consumedRecords.size());
+assertEquals(TP0_VALUE, consumedRecords.get(0).value());
+assertEquals(TP0_VALUE_NEW, consumedRecords.get(1).value());
+assertEquals(TP1_VALUE, consumedRecords.get(2).value());
+assertEquals(TP1_VALUE_NEW, consumedRecords.get(3).value());
 
 // Cleanup
 store.stop();



kafka git commit: KAFKA-2782: Fix KafkaBasedLogTest assertion and move it to the main test thread.

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 13ba57dcf -> 75f2b8c8b


KAFKA-2782: Fix KafkaBasedLogTest assertion and move it to the main test thread.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #463 from ewencp/kafka-2782-fix-kafka-based-log-test-assertion


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/75f2b8c8
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/75f2b8c8
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/75f2b8c8

Branch: refs/heads/trunk
Commit: 75f2b8c8b42459a3ab2962f75f0e05fbbb9e8333
Parents: 13ba57d
Author: Ewen Cheslack-Postava 
Authored: Mon Nov 9 10:39:34 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 10:39:34 2015 -0800

--
 .../apache/kafka/connect/util/KafkaBasedLogTest.java | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/75f2b8c8/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
--
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
index 1c3b842..2ead813 100644
--- 
a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java
@@ -258,14 +258,11 @@ public class KafkaBasedLogTest {
 assertEquals(2, invoked.get());
 
 // Now we should have to wait for the records to be read back when we 
call readToEnd()
-final AtomicBoolean getInvokedAndPassed = new AtomicBoolean(false);
+final AtomicBoolean getInvoked = new AtomicBoolean(false);
 final FutureCallback readEndFutureCallback = new 
FutureCallback<>(new Callback() {
 @Override
 public void onCompletion(Throwable error, Void result) {
-assertEquals(4, consumedRecords.size());
-assertEquals(TP0_VALUE_NEW, consumedRecords.get(2).value());
-assertEquals(TP1_VALUE_NEW, consumedRecords.get(3).value());
-getInvokedAndPassed.set(true);
+getInvoked.set(true);
 }
 });
 consumer.schedulePollTask(new Runnable() {
@@ -275,7 +272,6 @@ public class KafkaBasedLogTest {
 // that should follow. This readToEnd call will immediately 
wakeup this consumer.poll() call without
 // returning any data.
 store.readToEnd(readEndFutureCallback);
-
 // Needs to seek to end to find end offsets
 consumer.schedulePollTask(new Runnable() {
 @Override
@@ -311,7 +307,12 @@ public class KafkaBasedLogTest {
 }
 });
 readEndFutureCallback.get(1, TimeUnit.MILLISECONDS);
-assertTrue(getInvokedAndPassed.get());
+assertTrue(getInvoked.get());
+assertEquals(4, consumedRecords.size());
+assertEquals(TP0_VALUE, consumedRecords.get(0).value());
+assertEquals(TP0_VALUE_NEW, consumedRecords.get(1).value());
+assertEquals(TP1_VALUE, consumedRecords.get(2).value());
+assertEquals(TP1_VALUE_NEW, consumedRecords.get(3).value());
 
 // Cleanup
 store.stop();



[10/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/json/src/main/java/org/apache/kafka/copycat/json/JsonConverter.java
--
diff --git 
a/copycat/json/src/main/java/org/apache/kafka/copycat/json/JsonConverter.java 
b/copycat/json/src/main/java/org/apache/kafka/copycat/json/JsonConverter.java
deleted file mode 100644
index ca8f029..000
--- 
a/copycat/json/src/main/java/org/apache/kafka/copycat/json/JsonConverter.java
+++ /dev/null
@@ -1,735 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.json;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.node.ArrayNode;
-import com.fasterxml.jackson.databind.node.JsonNodeFactory;
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.kafka.common.cache.Cache;
-import org.apache.kafka.common.cache.LRUCache;
-import org.apache.kafka.common.cache.SynchronizedCache;
-import org.apache.kafka.common.errors.SerializationException;
-import org.apache.kafka.copycat.data.*;
-import org.apache.kafka.copycat.errors.DataException;
-import org.apache.kafka.copycat.storage.Converter;
-
-import java.io.IOException;
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * Implementation of Converter that uses JSON to store schemas and objects.
- */
-public class JsonConverter implements Converter {
-private static final String SCHEMAS_ENABLE_CONFIG = "schemas.enable";
-private static final boolean SCHEMAS_ENABLE_DEFAULT = true;
-private static final String SCHEMAS_CACHE_CONFIG = "schemas.cache.size";
-private static final int SCHEMAS_CACHE_SIZE_DEFAULT = 1000;
-
-private static final HashMap 
TO_COPYCAT_CONVERTERS = new HashMap<>();
-
-private static Object checkOptionalAndDefault(Schema schema) {
-if (schema.defaultValue() != null)
-return schema.defaultValue();
-if (schema.isOptional())
-return null;
-throw new DataException("Invalid null value for required field");
-}
-
-static {
-TO_COPYCAT_CONVERTERS.put(Schema.Type.BOOLEAN, new 
JsonToCopycatTypeConverter() {
-@Override
-public Object convert(Schema schema, JsonNode value) {
-if (value.isNull()) return checkOptionalAndDefault(schema);
-return value.booleanValue();
-}
-});
-TO_COPYCAT_CONVERTERS.put(Schema.Type.INT8, new 
JsonToCopycatTypeConverter() {
-@Override
-public Object convert(Schema schema, JsonNode value) {
-if (value.isNull()) return checkOptionalAndDefault(schema);
-return (byte) value.intValue();
-}
-});
-TO_COPYCAT_CONVERTERS.put(Schema.Type.INT16, new 
JsonToCopycatTypeConverter() {
-@Override
-public Object convert(Schema schema, JsonNode value) {
-if (value.isNull()) return checkOptionalAndDefault(schema);
-return (short) value.intValue();
-}
-});
-TO_COPYCAT_CONVERTERS.put(Schema.Type.INT32, new 
JsonToCopycatTypeConverter() {
-@Override
-public Object convert(Schema schema, JsonNode value) {
-if (value.isNull()) return checkOptionalAndDefault(schema);
-return value.intValue();
-}
-});
-TO_COPYCAT_CONVERTERS.put(Schema.Type.INT64, new 
JsonToCopycatTypeConverter() {
-@Override
-public Object convert(Schema schema, JsonNode value) {
-if (value.isNull()) return checkOptionalAndDefault(schema);
-return value.longValue();
-}
-});
-TO_COPYCAT_CONVERTERS.put(Schema.Type.FLOAT32, new 
JsonToCopycatTypeConverter() {
-@Override
-public Object convert(Schema schema, JsonNode value) {
-if (value.isNull()) return checkOptionalAndDefault(schema);
-   

kafka git commit: KAFKA-2826: Make Kafka Connect ducktape services easier to extend.

2015-11-12 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 2802bd081 -> 969d0cb0a


KAFKA-2826: Make Kafka Connect ducktape services easier to extend.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #522 from ewencp/kafka-2826-extensible-connect-services


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/969d0cb0
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/969d0cb0
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/969d0cb0

Branch: refs/heads/trunk
Commit: 969d0cb0ae316ba0dfdb34ed096bfd56fe86ad92
Parents: 2802bd0
Author: Ewen Cheslack-Postava 
Authored: Thu Nov 12 18:54:20 2015 -0800
Committer: Gwen Shapira 
Committed: Thu Nov 12 18:54:20 2015 -0800

--
 tests/kafkatest/services/connect.py | 23 +++
 1 file changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/969d0cb0/tests/kafkatest/services/connect.py
--
diff --git a/tests/kafkatest/services/connect.py 
b/tests/kafkatest/services/connect.py
index 26feb99..a6e902f 100644
--- a/tests/kafkatest/services/connect.py
+++ b/tests/kafkatest/services/connect.py
@@ -151,6 +151,13 @@ class ConnectStandaloneService(ConnectServiceBase):
 def node(self):
 return self.nodes[0]
 
+def start_cmd(self, node, connector_configs):
+cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " 
% self.LOG4J_CONFIG_FILE
+cmd += "/opt/%s/bin/connect-standalone.sh %s " % (kafka_dir(node), 
self.CONFIG_FILE)
+cmd += " ".join(connector_configs)
+cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, 
self.STDERR_FILE, self.PID_FILE)
+return cmd
+
 def start_node(self, node):
 node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, 
allow_fail=False)
 
@@ -164,10 +171,7 @@ class ConnectStandaloneService(ConnectServiceBase):
 
 self.logger.info("Starting Kafka Connect standalone process on " + 
str(node.account))
 with node.account.monitor_log(self.LOG_FILE) as monitor:
-node.account.ssh("( export 
KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE +
- "/opt/%s/bin/connect-standalone.sh %s " % 
(kafka_dir(node), self.CONFIG_FILE) +
- " ".join(remote_connector_configs) +
- (" & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % 
(self.STDOUT_FILE, self.STDERR_FILE, self.PID_FILE)))
+node.account.ssh(self.start_cmd(node, remote_connector_configs))
 monitor.wait_until('Kafka Connect started', timeout_sec=15, 
err_msg="Never saw message indicating Kafka Connect finished startup on " + 
str(node.account))
 
 if len(self.pids(node)) == 0:
@@ -182,6 +186,12 @@ class ConnectDistributedService(ConnectServiceBase):
 self.offsets_topic = offsets_topic
 self.configs_topic = configs_topic
 
+def start_cmd(self, node):
+cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " 
% self.LOG4J_CONFIG_FILE
+cmd += "/opt/%s/bin/connect-distributed.sh %s " % (kafka_dir(node), 
self.CONFIG_FILE)
+cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, 
self.STDERR_FILE, self.PID_FILE)
+return cmd
+
 def start_node(self, node):
 node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, 
allow_fail=False)
 
@@ -192,10 +202,7 @@ class ConnectDistributedService(ConnectServiceBase):
 
 self.logger.info("Starting Kafka Connect distributed process on " + 
str(node.account))
 with node.account.monitor_log(self.LOG_FILE) as monitor:
-cmd = "( export 
KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE
-cmd += "/opt/%s/bin/connect-distributed.sh %s " % 
(kafka_dir(node), self.CONFIG_FILE)
-cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, 
self.STDERR_FILE, self.PID_FILE)
-node.account.ssh(cmd)
+node.account.ssh(self.start_cmd(node))
 monitor.wait_until('Kafka Connect started', timeout_sec=15, 
err_msg="Never saw message indicating Kafka Connect finished startup on " + 
str(node.account))
 
 if len(self.pids(node)) == 0:



kafka git commit: MINOR: Increase timeouts for Kafka Connect system test service to make tests more reliable on wimpy nodes.

2015-11-12 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 c5965820a -> d04a918a2


MINOR: Increase timeouts for Kafka Connect system test service to make tests 
more reliable on wimpy nodes.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #514 from ewencp/connect-systest-generous-timeouts


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d04a918a
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d04a918a
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d04a918a

Branch: refs/heads/0.9.0
Commit: d04a918a239b6d894f71975b6e1e6dcc390424a3
Parents: c596582
Author: Ewen Cheslack-Postava 
Authored: Thu Nov 12 11:35:18 2015 -0800
Committer: Gwen Shapira 
Committed: Thu Nov 12 11:35:18 2015 -0800

--
 tests/kafkatest/services/connect.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/d04a918a/tests/kafkatest/services/connect.py
--
diff --git a/tests/kafkatest/services/connect.py 
b/tests/kafkatest/services/connect.py
index fbac565..8414092 100644
--- a/tests/kafkatest/services/connect.py
+++ b/tests/kafkatest/services/connect.py
@@ -58,7 +58,7 @@ class ConnectServiceBase(Service):
 for pid in pids:
 node.account.signal(pid, sig, allow_fail=False)
 for pid in pids:
-wait_until(lambda: not node.account.alive(pid), timeout_sec=10, 
err_msg="Kafka Connect standalone process took too long to exit")
+wait_until(lambda: not node.account.alive(pid), timeout_sec=60, 
err_msg="Kafka Connect standalone process took too long to exit")
 
 node.account.ssh("rm -f /mnt/connect.pid", allow_fail=False)
 
@@ -149,7 +149,7 @@ class ConnectStandaloneService(ConnectServiceBase):
 node.account.ssh("/opt/%s/bin/connect-standalone.sh 
/mnt/connect.properties " % kafka_dir(node) +
  " ".join(remote_connector_configs) +
  " 1>> /mnt/connect.log 2>> /mnt/connect.log & 
echo $! > /mnt/connect.pid")
-monitor.wait_until('Kafka Connect started', timeout_sec=10, 
err_msg="Never saw message indicating Kafka Connect finished startup")
+monitor.wait_until('Kafka Connect started', timeout_sec=15, 
err_msg="Never saw message indicating Kafka Connect finished startup")
 
 if len(self.pids(node)) == 0:
 raise RuntimeError("No process ids recorded")
@@ -173,7 +173,7 @@ class ConnectDistributedService(ConnectServiceBase):
 cmd = "/opt/%s/bin/connect-distributed.sh /mnt/connect.properties 
" % kafka_dir(node)
 cmd += " 1>> /mnt/connect.log 2>> /mnt/connect.log & echo $! > 
/mnt/connect.pid"
 node.account.ssh(cmd)
-monitor.wait_until('Kafka Connect started', timeout_sec=10, 
err_msg="Never saw message indicating Kafka Connect finished startup")
+monitor.wait_until('Kafka Connect started', timeout_sec=15, 
err_msg="Never saw message indicating Kafka Connect finished startup")
 
 if len(self.pids(node)) == 0:
 raise RuntimeError("No process ids recorded")



kafka git commit: MINOR: Tuned timeout parameter to reduce chance of transient failure

2015-11-12 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 c4e069ae5 -> e8f92d620


MINOR: Tuned timeout parameter to reduce chance of transient failure

Increased timeout in downstream consumer doing validation step. This addresses 
a transient failure case in mirror maker tests with mirror maker failover.

Author: Geoff Anderson 

Reviewers: Gwen Shapira

Closes #521 from granders/minor-mm-transient-failure

(cherry picked from commit 2802bd081c9cfd91b8d7aa104c145ef0514ada49)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/e8f92d62
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/e8f92d62
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/e8f92d62

Branch: refs/heads/0.9.0
Commit: e8f92d620360431548cca54d6545d1f44db12d2b
Parents: c4e069a
Author: Geoff Anderson 
Authored: Thu Nov 12 18:51:26 2015 -0800
Committer: Gwen Shapira 
Committed: Thu Nov 12 18:51:39 2015 -0800

--
 tests/kafkatest/tests/mirror_maker_test.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/e8f92d62/tests/kafkatest/tests/mirror_maker_test.py
--
diff --git a/tests/kafkatest/tests/mirror_maker_test.py 
b/tests/kafkatest/tests/mirror_maker_test.py
index d01f6b5..ad252ee 100644
--- a/tests/kafkatest/tests/mirror_maker_test.py
+++ b/tests/kafkatest/tests/mirror_maker_test.py
@@ -47,7 +47,7 @@ class TestMirrorMakerService(ProduceConsumeValidateTest):
 whitelist=self.topic, 
offset_commit_interval_ms=1000)
 # This will consume from target kafka cluster
 self.consumer = ConsoleConsumer(test_context, num_nodes=1, 
kafka=self.target_kafka, topic=self.topic,
-message_validator=is_int, 
consumer_timeout_ms=15000)
+message_validator=is_int, 
consumer_timeout_ms=6)
 
 def setUp(self):
 # Source cluster



kafka git commit: Minor: Missing License

2015-11-13 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk da9733091 -> 4511aeebc


Minor: Missing License

Author: Sriharsha Chintalapani 

Reviewers: Gwen Shapira

Closes #524 from harshach/missing-license


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/4511aeeb
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/4511aeeb
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/4511aeeb

Branch: refs/heads/trunk
Commit: 4511aeebc3e37a3d9e2ebeabbd6bbfd16d681719
Parents: da97330
Author: Sriharsha Chintalapani 
Authored: Fri Nov 13 10:25:00 2015 -0800
Committer: Gwen Shapira 
Committed: Fri Nov 13 10:25:00 2015 -0800

--
 .../scala/kafka/common/StreamEndException.scala | 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/4511aeeb/core/src/main/scala/kafka/common/StreamEndException.scala
--
diff --git a/core/src/main/scala/kafka/common/StreamEndException.scala 
b/core/src/main/scala/kafka/common/StreamEndException.scala
index 2d814f7..a9410bc 100644
--- a/core/src/main/scala/kafka/common/StreamEndException.scala
+++ b/core/src/main/scala/kafka/common/StreamEndException.scala
@@ -1,3 +1,19 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
 package kafka.common
 
 /**



kafka git commit: Minor: Missing License

2015-11-13 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 cb0de177c -> 3e133c4ee


Minor: Missing License

Author: Sriharsha Chintalapani 

Reviewers: Gwen Shapira

Closes #524 from harshach/missing-license

(cherry picked from commit 4511aeebc3e37a3d9e2ebeabbd6bbfd16d681719)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/3e133c4e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/3e133c4e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/3e133c4e

Branch: refs/heads/0.9.0
Commit: 3e133c4eed95590fb63e1d57e3a1793dff6ecac2
Parents: cb0de17
Author: Sriharsha Chintalapani 
Authored: Fri Nov 13 10:25:00 2015 -0800
Committer: Gwen Shapira 
Committed: Fri Nov 13 10:27:02 2015 -0800

--
 .../scala/kafka/common/StreamEndException.scala | 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/3e133c4e/core/src/main/scala/kafka/common/StreamEndException.scala
--
diff --git a/core/src/main/scala/kafka/common/StreamEndException.scala 
b/core/src/main/scala/kafka/common/StreamEndException.scala
index 2d814f7..a9410bc 100644
--- a/core/src/main/scala/kafka/common/StreamEndException.scala
+++ b/core/src/main/scala/kafka/common/StreamEndException.scala
@@ -1,3 +1,19 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
 package kafka.common
 
 /**



kafka git commit: KAFKA-2807: Move ThroughputThrottler back to tools jar to fix upgrade tests.

2015-11-11 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk a8ccdc615 -> c6b8de4e6


KAFKA-2807: Move ThroughputThrottler back to tools jar to fix upgrade tests.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #499 from ewencp/kafka-2807-relocate-throughput-throttler


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/c6b8de4e
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/c6b8de4e
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/c6b8de4e

Branch: refs/heads/trunk
Commit: c6b8de4e6806d8f9f4af57e15f2a7f4170265c42
Parents: a8ccdc6
Author: Ewen Cheslack-Postava 
Authored: Wed Nov 11 15:55:12 2015 -0800
Committer: Gwen Shapira 
Committed: Wed Nov 11 15:55:12 2015 -0800

--
 build.gradle|  45 +++---
 .../kafka/common/utils/ThroughputThrottler.java | 141 ---
 .../connect/tools/VerifiableSourceTask.java |   2 +-
 settings.gradle |   2 +-
 .../apache/kafka/tools/ProducerPerformance.java |   1 -
 .../apache/kafka/tools/ThroughputThrottler.java | 141 +++
 .../apache/kafka/tools/VerifiableProducer.java  |   1 -
 7 files changed, 166 insertions(+), 167 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/c6b8de4e/build.gradle
--
diff --git a/build.gradle b/build.gradle
index 70fdbcd..0ee6c41 100644
--- a/build.gradle
+++ b/build.gradle
@@ -230,7 +230,7 @@ for ( sv in ['2_10_5', '2_11_7'] ) {
   }
 }
 
-def connectPkgs = ['connect:api', 'connect:runtime', 'connect:json', 
'connect:file', 'connect:tools']
+def connectPkgs = ['connect-api', 'connect-runtime', 'connect-json', 
'connect-file', 'connect-tools']
 def pkgs = ['clients', 'examples', 'log4j-appender', 'tools', 'streams'] + 
connectPkgs
 
 tasks.create(name: "jarConnect", dependsOn: connectPkgs.collect { it + ":jar" 
}) {}
@@ -321,7 +321,7 @@ project(':core') {
 standardOutput = new File('docs/kafka_config.html').newOutputStream()
   }
 
-  task siteDocsTar(dependsOn: ['genProducerConfigDocs', 
'genConsumerConfigDocs', 'genKafkaConfigDocs', 
':connect:runtime:genConnectConfigDocs'], type: Tar) {
+  task siteDocsTar(dependsOn: ['genProducerConfigDocs', 
'genConsumerConfigDocs', 'genKafkaConfigDocs', 
':connect-runtime:genConnectConfigDocs'], type: Tar) {
 classifier = 'site-docs'
 compression = Compression.GZIP
 from project.file("../docs")
@@ -342,16 +342,16 @@ project(':core') {
 from(project.siteDocsTar) { into("site-docs/") }
 from(project(':tools').jar) { into("libs/") }
 from(project(':tools').configurations.runtime) { into("libs/") }
-from(project(':connect:api').jar) { into("libs/") }
-from(project(':connect:api').configurations.runtime) { into("libs/") }
-from(project(':connect:runtime').jar) { into("libs/") }
-from(project(':connect:runtime').configurations.runtime) { into("libs/") }
-from(project(':connect:json').jar) { into("libs/") }
-from(project(':connect:json').configurations.runtime) { into("libs/") }
-from(project(':connect:file').jar) { into("libs/") }
-from(project(':connect:file').configurations.runtime) { into("libs/") }
-from(project(':connect:tools').jar) { into("libs/") }
-from(project(':connect:tools').configurations.runtime) { into("libs/") }
+from(project(':connect-api').jar) { into("libs/") }
+from(project(':connect-api').configurations.runtime) { into("libs/") }
+from(project(':connect-runtime').jar) { into("libs/") }
+from(project(':connect-runtime').configurations.runtime) { into("libs/") }
+from(project(':connect-json').jar) { into("libs/") }
+from(project(':connect-json').configurations.runtime) { into("libs/") }
+from(project(':connect-file').jar) { into("libs/") }
+from(project(':connect-file').configurations.runtime) { into("libs/") }
+from(project(':connect-tools').jar) { into("libs/") }
+from(project(':connect-tools').configurations.runtime) { into("libs/") }
   }
 
   jar {
@@ -638,7 +638,7 @@ project(':log4j-appender') {
   test.dependsOn('checkstyleMain', 'checkstyleTest')
 }
 
-project(':connect:api') {
+project(':connect-api') {
   apply plugin: 'checkstyle'
   archivesBaseName = "connect-api"
 
@@ -695,12 +695,12 @@ project(':connect:api') {
   test.dependsOn('checkstyleMain', 'checkstyleTest')
 }
 
-project(':connect:json') {
+project(':connect-json') {
   apply plugin: 'checkstyle'
   archivesBaseName = "connect-json"
 
   dependencies {
-compile project(':connect:api')
+compile project(':connect-api')
 compile "$slf4japi"
 compile "com.fasterxml.jackson.core:jackson-databind:$jackson_version"
 
@@ -756,12 +756,12 @@ 

kafka git commit: KAFKA-2690: Hide passwords while logging the config.

2015-11-12 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk 370ce2b4b -> ab5ac264a


KAFKA-2690: Hide passwords while logging the config.

Added PASSWORD_STRING in ConfigDef that returns "[hidden]" when method toString 
is invoked.

Author: Jakub Nowak 

Reviewers: Ismael Juma, Gwen Shapira, Jun Rao

Closes #371 from Mszak/ssl-password-protection


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/ab5ac264
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/ab5ac264
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/ab5ac264

Branch: refs/heads/trunk
Commit: ab5ac264a71d7f895b21b4acfd93d9581dabd7c1
Parents: 370ce2b
Author: Jakub Nowak 
Authored: Thu Nov 12 10:07:04 2015 -0800
Committer: Gwen Shapira 
Committed: Thu Nov 12 10:07:04 2015 -0800

--
 .../kafka/common/config/AbstractConfig.java |  5 ++
 .../apache/kafka/common/config/ConfigDef.java   | 10 ++-
 .../apache/kafka/common/config/SslConfigs.java  |  6 +-
 .../kafka/common/config/types/Password.java | 68 
 .../kafka/common/security/ssl/SslFactory.java   | 23 +++
 .../kafka/common/config/ConfigDefTest.java  | 26 +++-
 .../common/network/SslTransportLayerTest.java   |  3 +-
 .../org/apache/kafka/test/TestSslUtils.java | 31 -
 .../main/scala/kafka/server/KafkaConfig.scala   | 12 ++--
 .../test/scala/unit/kafka/KafkaConfigTest.scala | 17 +
 10 files changed, 163 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/ab5ac264/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java 
b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
index 1029356..afb3b3f 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
@@ -16,6 +16,7 @@ import java.util.*;
 
 import org.apache.kafka.common.Configurable;
 import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.config.types.Password;
 import org.apache.kafka.common.utils.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -95,6 +96,10 @@ public class AbstractConfig {
 return (String) get(key);
 }
 
+public Password getPassword(String key) {
+return (Password) get(key);
+}
+
 public Class getClass(String key) {
 return (Class) get(key);
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/ab5ac264/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java 
b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
index 13fb829..fe7bcce 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
@@ -21,6 +21,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.kafka.common.config.types.Password;
 import org.apache.kafka.common.utils.Utils;
 
 /**
@@ -184,6 +185,13 @@ public class ConfigDef {
 return value;
 else
 throw new ConfigException(name, value, "Expected value 
to be either true or false");
+case PASSWORD:
+if (value instanceof Password)
+return value;
+else if (value instanceof String)
+return new Password(trimmed);
+else
+throw new ConfigException(name, value, "Expected value 
to be a string, but it was a " + value.getClass().getName());
 case STRING:
 if (value instanceof String)
 return trimmed;
@@ -252,7 +260,7 @@ public class ConfigDef {
  * The config types
  */
 public enum Type {
-BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS;
+BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD
 }
 
 public enum Importance {

http://git-wip-us.apache.org/repos/asf/kafka/blob/ab5ac264/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java 
b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java
index d257e35..ae4667a 100644
--- 

kafka git commit: KAFKA-2690: Hide passwords while logging the config.

2015-11-12 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 98124930f -> c5965820a


KAFKA-2690: Hide passwords while logging the config.

Added PASSWORD_STRING in ConfigDef that returns "[hidden]" when method toString 
is invoked.

Author: Jakub Nowak 

Reviewers: Ismael Juma, Gwen Shapira, Jun Rao

Closes #371 from Mszak/ssl-password-protection

(cherry picked from commit ab5ac264a71d7f895b21b4acfd93d9581dabd7c1)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/c5965820
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/c5965820
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/c5965820

Branch: refs/heads/0.9.0
Commit: c5965820a1fe81c010770106b6ca03b315272f9b
Parents: 9812493
Author: Jakub Nowak 
Authored: Thu Nov 12 10:07:04 2015 -0800
Committer: Gwen Shapira 
Committed: Thu Nov 12 10:07:17 2015 -0800

--
 .../kafka/common/config/AbstractConfig.java |  5 ++
 .../apache/kafka/common/config/ConfigDef.java   | 10 ++-
 .../apache/kafka/common/config/SslConfigs.java  |  6 +-
 .../kafka/common/config/types/Password.java | 68 
 .../kafka/common/security/ssl/SslFactory.java   | 23 +++
 .../kafka/common/config/ConfigDefTest.java  | 26 +++-
 .../common/network/SslTransportLayerTest.java   |  3 +-
 .../org/apache/kafka/test/TestSslUtils.java | 31 -
 .../main/scala/kafka/server/KafkaConfig.scala   | 12 ++--
 .../test/scala/unit/kafka/KafkaConfigTest.scala | 17 +
 10 files changed, 163 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/c5965820/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java 
b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
index 1029356..afb3b3f 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
@@ -16,6 +16,7 @@ import java.util.*;
 
 import org.apache.kafka.common.Configurable;
 import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.config.types.Password;
 import org.apache.kafka.common.utils.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -95,6 +96,10 @@ public class AbstractConfig {
 return (String) get(key);
 }
 
+public Password getPassword(String key) {
+return (Password) get(key);
+}
+
 public Class getClass(String key) {
 return (Class) get(key);
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/c5965820/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java 
b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
index 13fb829..fe7bcce 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
@@ -21,6 +21,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.kafka.common.config.types.Password;
 import org.apache.kafka.common.utils.Utils;
 
 /**
@@ -184,6 +185,13 @@ public class ConfigDef {
 return value;
 else
 throw new ConfigException(name, value, "Expected value 
to be either true or false");
+case PASSWORD:
+if (value instanceof Password)
+return value;
+else if (value instanceof String)
+return new Password(trimmed);
+else
+throw new ConfigException(name, value, "Expected value 
to be a string, but it was a " + value.getClass().getName());
 case STRING:
 if (value instanceof String)
 return trimmed;
@@ -252,7 +260,7 @@ public class ConfigDef {
  * The config types
  */
 public enum Type {
-BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS;
+BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD
 }
 
 public enum Importance {

http://git-wip-us.apache.org/repos/asf/kafka/blob/c5965820/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java 

kafka git commit: KAFKA-2790: doc improvements

2015-11-11 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 0ea17e959 -> 1828b8e52


KAFKA-2790: doc improvements

Author: Gwen Shapira <csh...@gmail.com>

Reviewers: Jun Rao, Guozhang Wang

Closes #491 from gwenshap/KAFKA-2790

(cherry picked from commit a8ccdc6154a1e10982cb80df82e8661903eb9ae5)
Signed-off-by: Gwen Shapira <csh...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/1828b8e5
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/1828b8e5
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/1828b8e5

Branch: refs/heads/0.9.0
Commit: 1828b8e52dd83a757580964d03b0148f19ef11fe
Parents: 0ea17e9
Author: Gwen Shapira <csh...@gmail.com>
Authored: Wed Nov 11 10:54:09 2015 -0800
Committer: Gwen Shapira <csh...@gmail.com>
Committed: Wed Nov 11 10:54:26 2015 -0800

--
 .../apache/kafka/common/config/ConfigDef.java   |  12 +-
 docs/api.html   |   8 +-
 docs/documentation.html |   6 +
 docs/ops.html   |   6 +-
 docs/quickstart.html|   6 +-
 docs/security.html  | 133 ++-
 docs/upgrade.html   |   3 +-
 7 files changed, 156 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/1828b8e5/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
--
diff --git 
a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java 
b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
index 2a5ebee..13fb829 100644
--- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
+++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
@@ -384,14 +384,14 @@ public class ConfigDef {
 }
 });
 StringBuilder b = new StringBuilder();
-b.append("\n");
+b.append("\n");
 b.append("\n");
 b.append("Name\n");
+b.append("Description\n");
 b.append("Type\n");
 b.append("Default\n");
 b.append("Valid Values\n");
 b.append("Importance\n");
-b.append("Description\n");
 b.append("\n");
 for (ConfigKey def : configs) {
 b.append("\n");
@@ -399,6 +399,9 @@ public class ConfigDef {
 b.append(def.name);
 b.append("");
 b.append("");
+b.append(def.documentation);
+b.append("");
+b.append("");
 b.append(def.type.toString().toLowerCase());
 b.append("");
 b.append("");
@@ -418,12 +421,9 @@ public class ConfigDef {
 b.append("");
 b.append(def.importance.toString().toLowerCase());
 b.append("");
-b.append("");
-b.append(def.documentation);
-b.append("");
 b.append("\n");
 }
-b.append("");
+b.append("");
 return b.toString();
 }
 }

http://git-wip-us.apache.org/repos/asf/kafka/blob/1828b8e5/docs/api.html
--
diff --git a/docs/api.html b/docs/api.html
index 835bdf2..3aad872 100644
--- a/docs/api.html
+++ b/docs/api.html
@@ -15,21 +15,21 @@
  limitations under the License.
 -->
 
-We are in the process of rewritting the JVM clients for Kafka. As of 0.8.2 
Kafka includes a newly rewritten Java producer. The next release will include 
an equivalent Java consumer. These new clients are meant to supplant the 
existing Scala clients, but for compatability they will co-exist for some time. 
These clients are available in a seperate jar with minimal dependencies, while 
the old Scala clients remain packaged with the server.
+Apache Kafka includes new java clients (in the org.apache.kafka.clients 
package). These are meant to supplant the older Scala clients, but for 
compatability they will co-exist for some time. These clients are available in 
a seperate jar with minimal dependencies, while the old Scala clients remain 
packaged with the server.
 
 2.1 Producer API
 
-As of the 0.8.2 release we encourage all new development to use the new Java 
producer. This client is production tested and generally both faster and more 
fully featured than the previous Scala client. You can use this client by 
adding a dependency on the client jar using the following example maven 
co-ordinates (you can change the ve

kafka git commit: KAFKA-2786: Only respond to SinkTask onPartitionsRevoked after the WorkerSinkTask has finished starting up.

2015-11-09 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/trunk bce664b42 -> 590a4616a


KAFKA-2786: Only respond to SinkTask onPartitionsRevoked after the 
WorkerSinkTask has finished starting up.

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #476 from ewencp/kafka-2786-on-partitions-assigned-only-after-start


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/590a4616
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/590a4616
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/590a4616

Branch: refs/heads/trunk
Commit: 590a4616a1030b3175ba5a548b5f1e1b49c323c2
Parents: bce664b
Author: Ewen Cheslack-Postava 
Authored: Mon Nov 9 20:43:07 2015 -0800
Committer: Gwen Shapira 
Committed: Mon Nov 9 20:43:07 2015 -0800

--
 .../org/apache/kafka/connect/runtime/WorkerSinkTask.java  | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/kafka/blob/590a4616/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
index a4d4093..643b10e 100644
--- 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java
@@ -191,7 +191,7 @@ class WorkerSinkTask implements WorkerTask {
 try {
 task.flush(offsets);
 } catch (Throwable t) {
-log.error("Commit of {} offsets failed due to exception while 
flushing: {}", this, t);
+log.error("Commit of {} offsets failed due to exception while 
flushing:", this, t);
 log.error("Rewinding offsets to last committed offsets");
 for (Map.Entry entry : 
lastCommittedOffsets.entrySet()) {
 log.debug("{} Rewinding topic partition {} to offset {}", id, 
entry.getKey(), entry.getValue().offset());
@@ -288,7 +288,7 @@ class WorkerSinkTask implements WorkerTask {
 pausedForRedelivery = false;
 }
 } catch (RetriableException e) {
-log.error("RetriableException from SinkTask {}: {}", id, e);
+log.error("RetriableException from SinkTask {}:", id, e);
 // If we're retrying a previous batch, make sure we've paused all 
topic partitions so we don't get new data,
 // but will still be able to poll in order to handle 
user-requested timeouts, keep group membership, etc.
 pausedForRedelivery = true;
@@ -361,8 +361,10 @@ class WorkerSinkTask implements WorkerTask {
 
 @Override
 public void onPartitionsRevoked(Collection partitions) 
{
-task.onPartitionsRevoked(partitions);
-commitOffsets(true, -1);
+if (started) {
+task.onPartitionsRevoked(partitions);
+commitOffsets(true, -1);
+}
 // Make sure we don't have any leftover data since offsets will be 
reset to committed positions
 messageBatch.clear();
 }



[09/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
deleted file mode 100644
index b09cb53..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.errors;
-
-/**
- * Indicates the operation tried to create an entity that already exists.
- */
-public class AlreadyExistsException extends CopycatException {
-public AlreadyExistsException(String s) {
-super(s);
-}
-
-public AlreadyExistsException(String s, Throwable throwable) {
-super(s, throwable);
-}
-
-public AlreadyExistsException(Throwable throwable) {
-super(throwable);
-}
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
deleted file mode 100644
index a8e13a9..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.errors;
-
-/**
- * Indicates that an operation attempted to modify or delete a connector or 
task that is not present on the worker.
- */
-public class NotFoundException extends CopycatException {
-public NotFoundException(String s) {
-super(s);
-}
-
-public NotFoundException(String s, Throwable throwable) {
-super(s, throwable);
-}
-
-public NotFoundException(Throwable throwable) {
-super(throwable);
-}
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
deleted file mode 100644
index 75821aa..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT 

[14/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
--
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
new file mode 100644
index 000..4e54bf1
--- /dev/null
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
@@ -0,0 +1,357 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.storage;
+
+import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.util.Callback;
+import org.apache.kafka.connect.util.KafkaBasedLog;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.api.easymock.annotation.Mock;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.powermock.reflect.Whitebox;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(KafkaOffsetBackingStore.class)
+@PowerMockIgnore("javax.management.*")
+public class KafkaOffsetBackingStoreTest {
+private static final String TOPIC = "connect-offsets";
+private static final Map DEFAULT_PROPS = new HashMap<>();
+static {
+DEFAULT_PROPS.put(KafkaOffsetBackingStore.OFFSET_STORAGE_TOPIC_CONFIG, 
TOPIC);
+DEFAULT_PROPS.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, 
"broker1:9092,broker2:9093");
+}
+private static final Map FIRST_SET = new 
HashMap<>();
+static {
+FIRST_SET.put(buffer("key"), buffer("value"));
+FIRST_SET.put(null, null);
+}
+
+private static final ByteBuffer TP0_KEY = buffer("TP0KEY");
+private static final ByteBuffer TP1_KEY = buffer("TP1KEY");
+private static final ByteBuffer TP2_KEY = buffer("TP2KEY");
+private static final ByteBuffer TP0_VALUE = buffer("VAL0");
+private static final ByteBuffer TP1_VALUE = buffer("VAL1");
+private static final ByteBuffer TP2_VALUE = buffer("VAL2");
+private static final ByteBuffer TP0_VALUE_NEW = buffer("VAL0_NEW");
+private static final ByteBuffer TP1_VALUE_NEW = buffer("VAL1_NEW");
+
+@Mock
+KafkaBasedLog storeLog;
+private KafkaOffsetBackingStore store;
+
+private Capture capturedTopic = EasyMock.newCapture();
+private Capture> capturedProducerProps = 
EasyMock.newCapture();
+private Capture> capturedConsumerProps = 
EasyMock.newCapture();
+private Capture>> 
capturedConsumedCallback = EasyMock.newCapture();
+
+@Before
+public void setUp() throws Exception {
+store = 
PowerMock.createPartialMockAndInvokeDefaultConstructor(KafkaOffsetBackingStore.class,
 new String[]{"createKafkaBasedLog"});
+}
+
+@Test(expected = ConnectException.class)
+public void testMissingTopic() {
+

[22/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
--
diff --git 
a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java 
b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
new file mode 100644
index 000..5e47ad2
--- /dev/null
+++ 
b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java
@@ -0,0 +1,735 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.json;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.JsonNodeFactory;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import org.apache.kafka.common.cache.Cache;
+import org.apache.kafka.common.cache.LRUCache;
+import org.apache.kafka.common.cache.SynchronizedCache;
+import org.apache.kafka.common.errors.SerializationException;
+import org.apache.kafka.connect.data.*;
+import org.apache.kafka.connect.errors.DataException;
+import org.apache.kafka.connect.storage.Converter;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * Implementation of Converter that uses JSON to store schemas and objects.
+ */
+public class JsonConverter implements Converter {
+private static final String SCHEMAS_ENABLE_CONFIG = "schemas.enable";
+private static final boolean SCHEMAS_ENABLE_DEFAULT = true;
+private static final String SCHEMAS_CACHE_CONFIG = "schemas.cache.size";
+private static final int SCHEMAS_CACHE_SIZE_DEFAULT = 1000;
+
+private static final HashMap 
TO_CONNECT_CONVERTERS = new HashMap<>();
+
+private static Object checkOptionalAndDefault(Schema schema) {
+if (schema.defaultValue() != null)
+return schema.defaultValue();
+if (schema.isOptional())
+return null;
+throw new DataException("Invalid null value for required field");
+}
+
+static {
+TO_CONNECT_CONVERTERS.put(Schema.Type.BOOLEAN, new 
JsonToConnectTypeConverter() {
+@Override
+public Object convert(Schema schema, JsonNode value) {
+if (value.isNull()) return checkOptionalAndDefault(schema);
+return value.booleanValue();
+}
+});
+TO_CONNECT_CONVERTERS.put(Schema.Type.INT8, new 
JsonToConnectTypeConverter() {
+@Override
+public Object convert(Schema schema, JsonNode value) {
+if (value.isNull()) return checkOptionalAndDefault(schema);
+return (byte) value.intValue();
+}
+});
+TO_CONNECT_CONVERTERS.put(Schema.Type.INT16, new 
JsonToConnectTypeConverter() {
+@Override
+public Object convert(Schema schema, JsonNode value) {
+if (value.isNull()) return checkOptionalAndDefault(schema);
+return (short) value.intValue();
+}
+});
+TO_CONNECT_CONVERTERS.put(Schema.Type.INT32, new 
JsonToConnectTypeConverter() {
+@Override
+public Object convert(Schema schema, JsonNode value) {
+if (value.isNull()) return checkOptionalAndDefault(schema);
+return value.intValue();
+}
+});
+TO_CONNECT_CONVERTERS.put(Schema.Type.INT64, new 
JsonToConnectTypeConverter() {
+@Override
+public Object convert(Schema schema, JsonNode value) {
+if (value.isNull()) return checkOptionalAndDefault(schema);
+return value.longValue();
+}
+});
+TO_CONNECT_CONVERTERS.put(Schema.Type.FLOAT32, new 
JsonToConnectTypeConverter() {
+@Override
+public Object convert(Schema schema, JsonNode value) {
+if (value.isNull()) return checkOptionalAndDefault(schema);
+   

[18/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
new file mode 100644
index 000..c8e0f6f
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime.rest.entities;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.Map;
+import java.util.Objects;
+
+public class CreateConnectorRequest {
+private final String name;
+private final Map config;
+
+@JsonCreator
+public CreateConnectorRequest(@JsonProperty("name") String name, 
@JsonProperty("config") Map config) {
+this.name = name;
+this.config = config;
+}
+
+@JsonProperty
+public String name() {
+return name;
+}
+
+@JsonProperty
+public Map config() {
+return config;
+}
+
+@Override
+public boolean equals(Object o) {
+if (this == o) return true;
+if (o == null || getClass() != o.getClass()) return false;
+CreateConnectorRequest that = (CreateConnectorRequest) o;
+return Objects.equals(name, that.name) &&
+Objects.equals(config, that.config);
+}
+
+@Override
+public int hashCode() {
+return Objects.hash(name, config);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
new file mode 100644
index 000..493b00d
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime.rest.entities;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.Objects;
+
+/**
+ * Standard error format for all REST API failures. These are generated 
automatically by
+ * {@link ConnectExceptionMapper} in response to uncaught
+ * {@link ConnectException}s.
+ */
+public class ErrorMessage {
+private final int errorCode;
+private final String message;
+
+@JsonCreator
+public ErrorMessage(@JsonProperty("error_code") int errorCode, 
@JsonProperty("message") String message) {
+this.errorCode = errorCode;
+this.message = message;
+}
+
+@JsonProperty("error_code")
+public int errorCode() {
+return errorCode;
+}
+
+@JsonProperty
+public String message() {
+return message;
+}
+
+@Override
+public boolean equals(Object 

[11/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/api/src/test/java/org/apache/kafka/copycat/data/SchemaProjectorTest.java
--
diff --git 
a/copycat/api/src/test/java/org/apache/kafka/copycat/data/SchemaProjectorTest.java
 
b/copycat/api/src/test/java/org/apache/kafka/copycat/data/SchemaProjectorTest.java
deleted file mode 100644
index 31a6f79..000
--- 
a/copycat/api/src/test/java/org/apache/kafka/copycat/data/SchemaProjectorTest.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements.  See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License.  You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- **/
-
-package org.apache.kafka.copycat.data;
-
-import org.apache.kafka.copycat.data.Schema.Type;
-import org.apache.kafka.copycat.errors.DataException;
-import org.apache.kafka.copycat.errors.SchemaProjectorException;
-import org.junit.Test;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-public class SchemaProjectorTest {
-
-@Test
-public void testPrimitiveTypeProjection() throws Exception {
-Object projected;
-projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, 
Schema.BOOLEAN_SCHEMA);
-assertEquals(false, projected);
-
-byte[] bytes = {(byte) 1, (byte) 2};
-projected  = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, 
Schema.BYTES_SCHEMA);
-assertEquals(bytes, projected);
-
-projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", 
Schema.STRING_SCHEMA);
-assertEquals("abc", projected);
-
-projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, 
Schema.OPTIONAL_BOOLEAN_SCHEMA);
-assertEquals(false, projected);
-
-projected  = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, 
Schema.OPTIONAL_BYTES_SCHEMA);
-assertEquals(bytes, projected);
-
-projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", 
Schema.OPTIONAL_STRING_SCHEMA);
-assertEquals("abc", projected);
-
-try {
-SchemaProjector.project(Schema.OPTIONAL_BOOLEAN_SCHEMA, false, 
Schema.BOOLEAN_SCHEMA);
-fail("Cannot project optional schema to schema with no default 
value.");
-} catch (DataException e) {
-// expected
-}
-
-try {
-SchemaProjector.project(Schema.OPTIONAL_BYTES_SCHEMA, bytes, 
Schema.BYTES_SCHEMA);
-fail("Cannot project optional schema to schema with no default 
value.");
-} catch (DataException e) {
-// expected
-}
-
-try {
-SchemaProjector.project(Schema.OPTIONAL_STRING_SCHEMA, "abc", 
Schema.STRING_SCHEMA);
-fail("Cannot project optional schema to schema with no default 
value.");
-} catch (DataException e) {
-// expected
-}
-}
-
-@Test
-public void testNumericTypeProjection() throws Exception {
-Schema[] promotableSchemas = {Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, 
Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA, 
Schema.FLOAT64_SCHEMA};
-Schema[] promotableOptionalSchemas = {Schema.OPTIONAL_INT8_SCHEMA, 
Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, 
Schema.OPTIONAL_INT64_SCHEMA,
-  Schema.OPTIONAL_FLOAT32_SCHEMA, 
Schema.OPTIONAL_FLOAT64_SCHEMA};
-
-Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 
1.2345};
-Map expectedProjected = new HashMap<>();
-expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 
127, 127, 127L, 127.F, 127.));
-expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 
255.F, 255.));
-expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 
32767.));
-expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 
327890.));
-expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2));
-expectedProjected.put(values[5], Arrays.asList(1.2345));
-
-

[19/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
new file mode 100644
index 000..85db168
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
@@ -0,0 +1,920 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime.distributed;
+
+import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.common.utils.SystemTime;
+import org.apache.kafka.common.utils.Time;
+import org.apache.kafka.common.utils.Utils;
+import org.apache.kafka.connect.connector.ConnectorContext;
+import org.apache.kafka.connect.errors.AlreadyExistsException;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.errors.NotFoundException;
+import org.apache.kafka.connect.runtime.ConnectorConfig;
+import org.apache.kafka.connect.runtime.Herder;
+import org.apache.kafka.connect.runtime.HerderConnectorContext;
+import org.apache.kafka.connect.runtime.TaskConfig;
+import org.apache.kafka.connect.runtime.Worker;
+import org.apache.kafka.connect.runtime.rest.RestServer;
+import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
+import org.apache.kafka.connect.runtime.rest.entities.TaskInfo;
+import org.apache.kafka.connect.sink.SinkConnector;
+import org.apache.kafka.connect.storage.KafkaConfigStorage;
+import org.apache.kafka.connect.util.Callback;
+import org.apache.kafka.connect.util.ConnectorTaskId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * 
+ * Distributed "herder" that coordinates with other workers to spread work 
across multiple processes.
+ * 
+ * 
+ * Under the hood, this is implemented as a group managed by Kafka's group 
membership facilities (i.e. the generalized
+ * group/consumer coordinator). Each instance of DistributedHerder joins 
the group and indicates what it's current
+ * configuration state is (where it is in the configuration log). The 
group coordinator selects one member to take
+ * this information and assign each instance a subset of the active 
connectors & tasks to execute. This assignment
+ * is currently performed in a simple round-robin fashion, but this is not 
guaranteed -- the herder may also choose
+ * to, e.g., use a sticky assignment to avoid the usual start/stop costs 
associated with connectors and tasks. Once
+ * an assignment is received, the DistributedHerder simply runs its 
assigned connectors and tasks in a Worker.
+ * 
+ * 
+ * In addition to distributing work, the DistributedHerder uses the leader 
determined during the work assignment
+ * to select a leader for this generation of the group who is responsible 
for other tasks that can only be performed
+ * by a single node at a time. Most importantly, this includes writing 
updated configurations for connectors and tasks,
+ * (and therefore, also for creating, destroy, and scaling up/down 
connectors).
+ * 
+ */
+public class DistributedHerder implements Herder, Runnable {
+private static final Logger log = 
LoggerFactory.getLogger(DistributedHerder.class);
+
+private static 

[17/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java
new file mode 100644
index 000..65bd9d0
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.storage;
+
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.common.utils.SystemTime;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.util.Callback;
+import org.apache.kafka.connect.util.ConvertingFutureCallback;
+import org.apache.kafka.connect.util.KafkaBasedLog;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * 
+ * Implementation of OffsetBackingStore that uses a Kafka topic to store 
offset data.
+ * 
+ * 
+ * Internally, this implementation both produces to and consumes from a 
Kafka topic which stores the offsets.
+ * It accepts producer and consumer overrides via its configuration but 
forces some settings to specific values
+ * to ensure correct behavior (e.g. acks, auto.offset.reset).
+ * 
+ */
+public class KafkaOffsetBackingStore implements OffsetBackingStore {
+private static final Logger log = 
LoggerFactory.getLogger(KafkaOffsetBackingStore.class);
+
+public final static String OFFSET_STORAGE_TOPIC_CONFIG = 
"offset.storage.topic";
+
+private KafkaBasedLog offsetLog;
+private HashMap data;
+
+@Override
+public void configure(Map configs) {
+String topic = (String) configs.get(OFFSET_STORAGE_TOPIC_CONFIG);
+if (topic == null)
+throw new ConnectException("Offset storage topic must be 
specified");
+
+data = new HashMap<>();
+
+Map producerProps = new HashMap<>();
+producerProps.putAll(configs);
+producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArraySerializer");
+producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArraySerializer");
+producerProps.put(ProducerConfig.ACKS_CONFIG, "all");
+
+Map consumerProps = new HashMap<>();
+consumerProps.putAll(configs);
+consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
+consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
+consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
+
+offsetLog = createKafkaBasedLog(topic, producerProps, consumerProps, 
consumedCallback);
+}
+
+@Override
+public void start() {
+log.info("Starting KafkaOffsetBackingStore");
+offsetLog.start();
+log.info("Finished reading offsets topic and starting 
KafkaOffsetBackingStore");
+}
+
+@Override
+public void stop() {
+log.info("Stopping KafkaOffsetBackingStore");
+offsetLog.stop();
+log.info("Stopped KafkaOffsetBackingStore");
+}
+
+@Override
+public Future> get(final 
Collection keys,
+   final 
Callback> 

[12/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/api/src/main/java/org/apache/kafka/copycat/sink/SinkTask.java
--
diff --git 
a/copycat/api/src/main/java/org/apache/kafka/copycat/sink/SinkTask.java 
b/copycat/api/src/main/java/org/apache/kafka/copycat/sink/SinkTask.java
deleted file mode 100644
index 90651ed..000
--- a/copycat/api/src/main/java/org/apache/kafka/copycat/sink/SinkTask.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-package org.apache.kafka.copycat.sink;
-
-import org.apache.kafka.clients.consumer.OffsetAndMetadata;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.annotation.InterfaceStability;
-import org.apache.kafka.copycat.connector.Task;
-
-import java.util.Collection;
-import java.util.Map;
-
-/**
- * SinkTask is a Task takes records loaded from Kafka and sends them to 
another system. In
- * addition to the basic {@link #put} interface, SinkTasks must also implement 
{@link #flush}
- * to support offset commits.
- */
-@InterfaceStability.Unstable
-public abstract class SinkTask implements Task {
-
-/**
- * 
- * The configuration key that provides the list of topics that are inputs 
for this
- * SinkTask.
- * 
- */
-public static final String TOPICS_CONFIG = "topics";
-
-protected SinkTaskContext context;
-
-public void initialize(SinkTaskContext context) {
-this.context = context;
-}
-
-/**
- * Start the Task. This should handle any configuration parsing and 
one-time setup of the task.
- * @param props initial configuration
- */
-@Override
-public abstract void start(Map props);
-
-/**
- * Put the records in the sink. Usually this should send the records to 
the sink asynchronously
- * and immediately return.
- *
- * If this operation fails, the SinkTask may throw a {@link 
org.apache.kafka.copycat.errors.RetriableException} to
- * indicate that the framework should attempt to retry the same call 
again. Other exceptions will cause the task to
- * be stopped immediately. {@link SinkTaskContext#timeout(long)} can be 
used to set the maximum time before the
- * batch will be retried.
- *
- * @param records the set of records to send
- */
-public abstract void put(Collection records);
-
-/**
- * Flush all records that have been {@link #put} for the specified 
topic-partitions. The
- * offsets are provided for convenience, but could also be determined by 
tracking all offsets
- * included in the SinkRecords passed to {@link #put}.
- *
- * @param offsets mapping of TopicPartition to committed offset
- */
-public abstract void flush(Map offsets);
-
-/**
- * The SinkTask use this method to create writers for newly assigned 
partitions in case of partition
- * re-assignment. In partition re-assignment, some new partitions may be 
assigned to the SinkTask.
- * The SinkTask needs to create writers and perform necessary recovery for 
the newly assigned partitions.
- * This method will be called after partition re-assignment completes and 
before the SinkTask starts
- * fetching data.
- * @param partitions The list of partitions that are now assigned to the 
task (may include
- *   partitions previously assigned to the task)
- */
-public void onPartitionsAssigned(Collection partitions) {
-}
-
-/**
- * The SinkTask use this method to close writers and commit offsets for 
partitions that are
- * longer assigned to the SinkTask. This method will be called before a 
rebalance operation starts
- * and after the SinkTask stops fetching data.
- * @param partitions The list of partitions that were assigned to the 
consumer on the last
- *   rebalance
- */
-public void onPartitionsRevoked(Collection partitions) {
-}
-
-/**
- * Perform any cleanup to stop this task. In SinkTasks, this method is 
invoked only once outstanding calls to other
- * methods have completed (e.g., 

[14/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
--
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
new file mode 100644
index 000..4e54bf1
--- /dev/null
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java
@@ -0,0 +1,357 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.storage;
+
+import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.util.Callback;
+import org.apache.kafka.connect.util.KafkaBasedLog;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.api.easymock.annotation.Mock;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.powermock.reflect.Whitebox;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(KafkaOffsetBackingStore.class)
+@PowerMockIgnore("javax.management.*")
+public class KafkaOffsetBackingStoreTest {
+private static final String TOPIC = "connect-offsets";
+private static final Map DEFAULT_PROPS = new HashMap<>();
+static {
+DEFAULT_PROPS.put(KafkaOffsetBackingStore.OFFSET_STORAGE_TOPIC_CONFIG, 
TOPIC);
+DEFAULT_PROPS.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, 
"broker1:9092,broker2:9093");
+}
+private static final Map FIRST_SET = new 
HashMap<>();
+static {
+FIRST_SET.put(buffer("key"), buffer("value"));
+FIRST_SET.put(null, null);
+}
+
+private static final ByteBuffer TP0_KEY = buffer("TP0KEY");
+private static final ByteBuffer TP1_KEY = buffer("TP1KEY");
+private static final ByteBuffer TP2_KEY = buffer("TP2KEY");
+private static final ByteBuffer TP0_VALUE = buffer("VAL0");
+private static final ByteBuffer TP1_VALUE = buffer("VAL1");
+private static final ByteBuffer TP2_VALUE = buffer("VAL2");
+private static final ByteBuffer TP0_VALUE_NEW = buffer("VAL0_NEW");
+private static final ByteBuffer TP1_VALUE_NEW = buffer("VAL1_NEW");
+
+@Mock
+KafkaBasedLog storeLog;
+private KafkaOffsetBackingStore store;
+
+private Capture capturedTopic = EasyMock.newCapture();
+private Capture> capturedProducerProps = 
EasyMock.newCapture();
+private Capture> capturedConsumerProps = 
EasyMock.newCapture();
+private Capture>> 
capturedConsumedCallback = EasyMock.newCapture();
+
+@Before
+public void setUp() throws Exception {
+store = 
PowerMock.createPartialMockAndInvokeDefaultConstructor(KafkaOffsetBackingStore.class,
 new String[]{"createKafkaBasedLog"});
+}
+
+@Test(expected = ConnectException.class)
+public void testMissingTopic() {
+

[16/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
--
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
new file mode 100644
index 000..6915631
--- /dev/null
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
@@ -0,0 +1,563 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime;
+
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.clients.consumer.OffsetCommitCallback;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Time;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.SchemaAndValue;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
+import org.apache.kafka.connect.sink.SinkConnector;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kafka.connect.sink.SinkTask;
+import org.apache.kafka.connect.storage.Converter;
+import org.apache.kafka.connect.util.ConnectorTaskId;
+import org.apache.kafka.connect.util.MockTime;
+import org.apache.kafka.connect.util.ThreadedTest;
+import org.easymock.Capture;
+import org.easymock.CaptureType;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.easymock.IExpectationSetters;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.api.easymock.annotation.Mock;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.powermock.reflect.Whitebox;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(WorkerSinkTask.class)
+@PowerMockIgnore("javax.management.*")
+public class WorkerSinkTaskThreadedTest extends ThreadedTest {
+
+// These are fixed to keep this code simpler. In this example we assume 
byte[] raw values
+// with mix of integer/string in Connect
+private static final String TOPIC = "test";
+private static final int PARTITION = 12;
+private static final int PARTITION2 = 13;
+private static final int PARTITION3 = 14;
+private static final long FIRST_OFFSET = 45;
+private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA;
+private static final int KEY = 12;
+private static final Schema VALUE_SCHEMA = Schema.STRING_SCHEMA;
+private static final String VALUE = "VALUE";
+private static final byte[] RAW_KEY = "key".getBytes();
+private static final byte[] RAW_VALUE = "value".getBytes();
+
+private static final TopicPartition TOPIC_PARTITION = new 
TopicPartition(TOPIC, PARTITION);
+private static final TopicPartition TOPIC_PARTITION2 = new 
TopicPartition(TOPIC, PARTITION2);
+private static final TopicPartition TOPIC_PARTITION3 = new 
TopicPartition(TOPIC, PARTITION3);
+private static final TopicPartition UNASSIGNED_TOPIC_PARTITION = new 
TopicPartition(TOPIC, 200);
+
+private static final Map TASK_PROPS = new HashMap<>();
+static {
+TASK_PROPS.put(SinkConnector.TOPICS_CONFIG, TOPIC);
+}
+
+private ConnectorTaskId taskId = new ConnectorTaskId("job", 0);
+private Time time;
+@Mock private SinkTask sinkTask;
+private Capture 

[19/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
new file mode 100644
index 000..85db168
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java
@@ -0,0 +1,920 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime.distributed;
+
+import org.apache.kafka.common.errors.WakeupException;
+import org.apache.kafka.common.config.ConfigException;
+import org.apache.kafka.common.utils.SystemTime;
+import org.apache.kafka.common.utils.Time;
+import org.apache.kafka.common.utils.Utils;
+import org.apache.kafka.connect.connector.ConnectorContext;
+import org.apache.kafka.connect.errors.AlreadyExistsException;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.errors.NotFoundException;
+import org.apache.kafka.connect.runtime.ConnectorConfig;
+import org.apache.kafka.connect.runtime.Herder;
+import org.apache.kafka.connect.runtime.HerderConnectorContext;
+import org.apache.kafka.connect.runtime.TaskConfig;
+import org.apache.kafka.connect.runtime.Worker;
+import org.apache.kafka.connect.runtime.rest.RestServer;
+import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo;
+import org.apache.kafka.connect.runtime.rest.entities.TaskInfo;
+import org.apache.kafka.connect.sink.SinkConnector;
+import org.apache.kafka.connect.storage.KafkaConfigStorage;
+import org.apache.kafka.connect.util.Callback;
+import org.apache.kafka.connect.util.ConnectorTaskId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * 
+ * Distributed "herder" that coordinates with other workers to spread work 
across multiple processes.
+ * 
+ * 
+ * Under the hood, this is implemented as a group managed by Kafka's group 
membership facilities (i.e. the generalized
+ * group/consumer coordinator). Each instance of DistributedHerder joins 
the group and indicates what it's current
+ * configuration state is (where it is in the configuration log). The 
group coordinator selects one member to take
+ * this information and assign each instance a subset of the active 
connectors & tasks to execute. This assignment
+ * is currently performed in a simple round-robin fashion, but this is not 
guaranteed -- the herder may also choose
+ * to, e.g., use a sticky assignment to avoid the usual start/stop costs 
associated with connectors and tasks. Once
+ * an assignment is received, the DistributedHerder simply runs its 
assigned connectors and tasks in a Worker.
+ * 
+ * 
+ * In addition to distributing work, the DistributedHerder uses the leader 
determined during the work assignment
+ * to select a leader for this generation of the group who is responsible 
for other tasks that can only be performed
+ * by a single node at a time. Most importantly, this includes writing 
updated configurations for connectors and tasks,
+ * (and therefore, also for creating, destroy, and scaling up/down 
connectors).
+ * 
+ */
+public class DistributedHerder implements Herder, Runnable {
+private static final Logger log = 
LoggerFactory.getLogger(DistributedHerder.class);
+
+private static 

[21/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
new file mode 100644
index 000..6fdefdf
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.errors;
+
+/**
+ * Indicates the operation tried to create an entity that already exists.
+ */
+public class AlreadyExistsException extends ConnectException {
+public AlreadyExistsException(String s) {
+super(s);
+}
+
+public AlreadyExistsException(String s, Throwable throwable) {
+super(s, throwable);
+}
+
+public AlreadyExistsException(Throwable throwable) {
+super(throwable);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
new file mode 100644
index 000..a3bbe91
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.errors;
+
+/**
+ * Indicates that an operation attempted to modify or delete a connector or 
task that is not present on the worker.
+ */
+public class NotFoundException extends ConnectException {
+public NotFoundException(String s) {
+super(s);
+}
+
+public NotFoundException(String s, Throwable throwable) {
+super(s, throwable);
+}
+
+public NotFoundException(Throwable throwable) {
+super(throwable);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
new file mode 100644
index 000..1b5b07a
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR 

[24/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/api/src/main/java/org/apache/kafka/connect/source/SourceRecord.java
--
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceRecord.java 
b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceRecord.java
new file mode 100644
index 000..1890062
--- /dev/null
+++ 
b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceRecord.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.source;
+
+import org.apache.kafka.common.annotation.InterfaceStability;
+import org.apache.kafka.connect.connector.ConnectRecord;
+import org.apache.kafka.connect.data.Schema;
+
+import java.util.Map;
+
+/**
+ * 
+ * SourceRecords are generated by SourceTasks and passed to Kafka Connect for 
storage in
+ * Kafka. In addition to the standard fields in {@link ConnectRecord} which 
specify where data is stored
+ * in Kafka, they also include a sourcePartition and sourceOffset.
+ * 
+ * 
+ * The sourcePartition represents a single input sourcePartition that the 
record came from (e.g. a filename, table
+ * name, or topic-partition). The sourceOffset represents a position in that 
sourcePartition which can be used
+ * to resume consumption of data.
+ * 
+ * 
+ * These values can have arbitrary structure and should be represented using
+ * org.apache.kafka.connect.data objects (or primitive values). For example, a 
database connector
+ * might specify the sourcePartition as a record containing { "db": 
"database_name", "table":
+ * "table_name"} and the sourceOffset as a Long containing the timestamp of 
the row.
+ * 
+ */
+@InterfaceStability.Unstable
+public class SourceRecord extends ConnectRecord {
+private final Map sourcePartition;
+private final Map sourceOffset;
+
+public SourceRecord(Map sourcePartition, Map 
sourceOffset,
+String topic, Integer partition, Schema valueSchema, 
Object value) {
+this(sourcePartition, sourceOffset, topic, partition, null, null, 
valueSchema, value);
+}
+
+public SourceRecord(Map sourcePartition, Map 
sourceOffset,
+String topic, Schema valueSchema, Object value) {
+this(sourcePartition, sourceOffset, topic, null, null, null, 
valueSchema, value);
+}
+
+public SourceRecord(Map sourcePartition, Map 
sourceOffset,
+String topic, Integer partition,
+Schema keySchema, Object key, Schema valueSchema, 
Object value) {
+super(topic, partition, keySchema, key, valueSchema, value);
+this.sourcePartition = sourcePartition;
+this.sourceOffset = sourceOffset;
+}
+
+public Map sourcePartition() {
+return sourcePartition;
+}
+
+public Map sourceOffset() {
+return sourceOffset;
+}
+
+@Override
+public boolean equals(Object o) {
+if (this == o)
+return true;
+if (o == null || getClass() != o.getClass())
+return false;
+if (!super.equals(o))
+return false;
+
+SourceRecord that = (SourceRecord) o;
+
+if (sourcePartition != null ? 
!sourcePartition.equals(that.sourcePartition) : that.sourcePartition != null)
+return false;
+if (sourceOffset != null ? !sourceOffset.equals(that.sourceOffset) : 
that.sourceOffset != null)
+return false;
+
+return true;
+}
+
+@Override
+public int hashCode() {
+int result = super.hashCode();
+result = 31 * result + (sourcePartition != null ? 
sourcePartition.hashCode() : 0);
+result = 31 * result + (sourceOffset != null ? sourceOffset.hashCode() 
: 0);
+return result;
+}
+
+@Override
+public String toString() {
+return "SourceRecord{" +
+"sourcePartition=" + sourcePartition +
+", sourceOffset=" + sourceOffset +
+"} " + super.toString();
+}
+}


[08/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
deleted file mode 100644
index 6577fe9..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime;
-
-import org.apache.kafka.common.utils.Time;
-import org.apache.kafka.clients.producer.Callback;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
-import org.apache.kafka.copycat.source.SourceRecord;
-import org.apache.kafka.copycat.source.SourceTask;
-import org.apache.kafka.copycat.storage.Converter;
-import org.apache.kafka.copycat.storage.OffsetStorageReader;
-import org.apache.kafka.copycat.storage.OffsetStorageWriter;
-import org.apache.kafka.copycat.util.ConnectorTaskId;
-import org.apache.kafka.copycat.util.ShutdownableThread;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.IdentityHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * WorkerTask that uses a SourceTask to ingest data into Kafka.
- */
-class WorkerSourceTask implements WorkerTask {
-private static final Logger log = 
LoggerFactory.getLogger(WorkerSourceTask.class);
-
-private final ConnectorTaskId id;
-private final SourceTask task;
-private final Converter keyConverter;
-private final Converter valueConverter;
-private KafkaProducer producer;
-private WorkerSourceTaskThread workThread;
-private final OffsetStorageReader offsetReader;
-private final OffsetStorageWriter offsetWriter;
-private final WorkerConfig workerConfig;
-private final Time time;
-
-// Use IdentityHashMap to ensure correctness with duplicate records. This 
is a HashMap because
-// there is no IdentityHashSet.
-private IdentityHashMap, 
ProducerRecord> outstandingMessages;
-// A second buffer is used while an offset flush is running
-private IdentityHashMap, 
ProducerRecord> outstandingMessagesBacklog;
-private boolean flushing;
-
-public WorkerSourceTask(ConnectorTaskId id, SourceTask task,
-Converter keyConverter, Converter valueConverter,
-KafkaProducer producer,
-OffsetStorageReader offsetReader, 
OffsetStorageWriter offsetWriter,
-WorkerConfig workerConfig, Time time) {
-this.id = id;
-this.task = task;
-this.keyConverter = keyConverter;
-this.valueConverter = valueConverter;
-this.producer = producer;
-this.offsetReader = offsetReader;
-this.offsetWriter = offsetWriter;
-this.workerConfig = workerConfig;
-this.time = time;
-
-this.outstandingMessages = new IdentityHashMap<>();
-this.outstandingMessagesBacklog = new IdentityHashMap<>();
-this.flushing = false;
-}
-
-@Override
-public void start(Map props) {
-workThread = new WorkerSourceTaskThread("WorkerSourceTask-" + id, 
props);
-workThread.start();
-}
-
-@Override
-public void stop() {
-if (workThread != null)
-workThread.startGracefulShutdown();
-}
-
-@Override
-public boolean awaitStop(long timeoutMs) {
-boolean success = true;
-if (workThread != null) {
-try {
-success = workThread.awaitShutdown(timeoutMs, 
TimeUnit.MILLISECONDS);

[02/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStoreTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStoreTest.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStoreTest.java
deleted file mode 100644
index 69d9ab4..000
--- 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStoreTest.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.storage;
-
-import org.apache.kafka.clients.CommonClientConfigs;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.common.KafkaException;
-import org.apache.kafka.copycat.errors.CopycatException;
-import org.apache.kafka.copycat.util.Callback;
-import org.apache.kafka.copycat.util.KafkaBasedLog;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.IAnswer;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.api.easymock.PowerMock;
-import org.powermock.api.easymock.annotation.Mock;
-import org.powermock.core.classloader.annotations.PowerMockIgnore;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-import org.powermock.reflect.Whitebox;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-@RunWith(PowerMockRunner.class)
-@PrepareForTest(KafkaOffsetBackingStore.class)
-@PowerMockIgnore("javax.management.*")
-public class KafkaOffsetBackingStoreTest {
-private static final String TOPIC = "copycat-offsets";
-private static final Map DEFAULT_PROPS = new HashMap<>();
-static {
-DEFAULT_PROPS.put(KafkaOffsetBackingStore.OFFSET_STORAGE_TOPIC_CONFIG, 
TOPIC);
-DEFAULT_PROPS.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, 
"broker1:9092,broker2:9093");
-}
-private static final Map FIRST_SET = new 
HashMap<>();
-static {
-FIRST_SET.put(buffer("key"), buffer("value"));
-FIRST_SET.put(null, null);
-}
-
-private static final ByteBuffer TP0_KEY = buffer("TP0KEY");
-private static final ByteBuffer TP1_KEY = buffer("TP1KEY");
-private static final ByteBuffer TP2_KEY = buffer("TP2KEY");
-private static final ByteBuffer TP0_VALUE = buffer("VAL0");
-private static final ByteBuffer TP1_VALUE = buffer("VAL1");
-private static final ByteBuffer TP2_VALUE = buffer("VAL2");
-private static final ByteBuffer TP0_VALUE_NEW = buffer("VAL0_NEW");
-private static final ByteBuffer TP1_VALUE_NEW = buffer("VAL1_NEW");
-
-@Mock
-KafkaBasedLog storeLog;
-private KafkaOffsetBackingStore store;
-
-private Capture capturedTopic = EasyMock.newCapture();
-private Capture> capturedProducerProps = 
EasyMock.newCapture();
-private Capture> capturedConsumerProps = 
EasyMock.newCapture();
-private Capture>> 
capturedConsumedCallback = EasyMock.newCapture();
-
-@Before
-public void setUp() throws Exception {
-store = 
PowerMock.createPartialMockAndInvokeDefaultConstructor(KafkaOffsetBackingStore.class,
 new String[]{"createKafkaBasedLog"});
-}
-
-@Test(expected = CopycatException.class)
-public void testMissingTopic() {
-

[06/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
deleted file mode 100644
index 02ff08b..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime.rest.entities;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.Map;
-import java.util.Objects;
-
-public class CreateConnectorRequest {
-private final String name;
-private final Map config;
-
-@JsonCreator
-public CreateConnectorRequest(@JsonProperty("name") String name, 
@JsonProperty("config") Map config) {
-this.name = name;
-this.config = config;
-}
-
-@JsonProperty
-public String name() {
-return name;
-}
-
-@JsonProperty
-public Map config() {
-return config;
-}
-
-@Override
-public boolean equals(Object o) {
-if (this == o) return true;
-if (o == null || getClass() != o.getClass()) return false;
-CreateConnectorRequest that = (CreateConnectorRequest) o;
-return Objects.equals(name, that.name) &&
-Objects.equals(config, that.config);
-}
-
-@Override
-public int hashCode() {
-return Objects.hash(name, config);
-}
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
deleted file mode 100644
index 6cbc140..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime.rest.entities;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.Objects;
-
-/**
- * Standard error format for all REST API failures. These are generated 
automatically by
- * {@link org.apache.kafka.copycat.runtime.rest.errors.CopycatExceptionMapper} 
in response to uncaught
- * {@link org.apache.kafka.copycat.errors.CopycatException}s.
- */
-public class ErrorMessage {
-private final int errorCode;
-private final String message;
-
-@JsonCreator
-public ErrorMessage(@JsonProperty("error_code") int errorCode, 
@JsonProperty("message") String message) {
-this.errorCode = errorCode;
-this.message = message;
-}
-
-@JsonProperty("error_code")
-public int errorCode() {
-return errorCode;
-}
-
-@JsonProperty
-public String message() 

[03/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/WorkerCoordinatorTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/WorkerCoordinatorTest.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/WorkerCoordinatorTest.java
deleted file mode 100644
index ac9df44..000
--- 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/distributed/WorkerCoordinatorTest.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-package org.apache.kafka.copycat.runtime.distributed;
-
-import org.apache.kafka.clients.ClientRequest;
-import org.apache.kafka.clients.Metadata;
-import org.apache.kafka.clients.MockClient;
-import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient;
-import org.apache.kafka.common.Cluster;
-import org.apache.kafka.common.Node;
-import org.apache.kafka.common.metrics.Metrics;
-import org.apache.kafka.common.protocol.Errors;
-import org.apache.kafka.common.protocol.types.Struct;
-import org.apache.kafka.common.requests.GroupCoordinatorResponse;
-import org.apache.kafka.common.requests.JoinGroupResponse;
-import org.apache.kafka.common.requests.SyncGroupRequest;
-import org.apache.kafka.common.requests.SyncGroupResponse;
-import org.apache.kafka.common.utils.MockTime;
-import org.apache.kafka.copycat.storage.KafkaConfigStorage;
-import org.apache.kafka.copycat.util.ConnectorTaskId;
-import org.apache.kafka.test.TestUtils;
-import org.easymock.EasyMock;
-import org.easymock.Mock;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.powermock.api.easymock.PowerMock;
-import org.powermock.reflect.Whitebox;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-public class WorkerCoordinatorTest {
-
-private static final String LEADER_URL = "leaderUrl:8083";
-private static final String MEMBER_URL = "memberUrl:8083";
-
-private String connectorId = "connector";
-private String connectorId2 = "connector2";
-private ConnectorTaskId taskId0 = new ConnectorTaskId(connectorId, 0);
-private ConnectorTaskId taskId1 = new ConnectorTaskId(connectorId, 1);
-private ConnectorTaskId taskId2 = new ConnectorTaskId(connectorId2, 0);
-
-private String groupId = "test-group";
-private int sessionTimeoutMs = 10;
-private int heartbeatIntervalMs = 2;
-private long retryBackoffMs = 100;
-private long requestTimeoutMs = 5000;
-private MockTime time;
-private MockClient client;
-private Cluster cluster = TestUtils.singletonCluster("topic", 1);
-private Node node = cluster.nodes().get(0);
-private Metadata metadata;
-private Metrics metrics;
-private Map metricTags = new LinkedHashMap<>();
-private ConsumerNetworkClient consumerClient;
-private MockRebalanceListener rebalanceListener;
-@Mock private KafkaConfigStorage configStorage;
-private WorkerCoordinator coordinator;
-
-private ClusterConfigState configState1;
-private ClusterConfigState configState2;
-
-@Before
-public void setup() {
-this.time = new MockTime();
-this.client = new MockClient(time);
-this.metadata = new Metadata(0, Long.MAX_VALUE);
-this.metadata.update(cluster, time.milliseconds());
-this.consumerClient = new ConsumerNetworkClient(client, metadata, 
time, 100);
-this.metrics = new Metrics(time);
-this.rebalanceListener = new MockRebalanceListener();
-this.configStorage = PowerMock.createMock(KafkaConfigStorage.class);
-
-client.setNode(node);
-
-this.coordinator = new WorkerCoordinator(consumerClient,
-groupId,
-sessionTimeoutMs,
-heartbeatIntervalMs,
-

[25/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java
--
diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java 
b/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java
new file mode 100644
index 000..842da66
--- /dev/null
+++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.data;
+
+import org.apache.kafka.connect.errors.DataException;
+
+import java.util.Calendar;
+import java.util.TimeZone;
+
+/**
+ * 
+ * A date representing a calendar day with no time of day or timezone. The 
corresponding Java type is a java.util.Date
+ * with hours, minutes, seconds, milliseconds set to 0. The underlying 
representation is an integer representing the
+ * number of standardized days (based on a number of milliseconds with 24 
hours/day, 60 minutes/hour, 60 seconds/minute,
+ * 1000 milliseconds/second with n) since Unix epoch.
+ * 
+ */
+public class Date {
+public static final String LOGICAL_NAME = 
"org.apache.kafka.connect.data.Date";
+
+private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
+
+private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
+
+/**
+ * Returns a SchemaBuilder for a Date. By returning a SchemaBuilder you 
can override additional schema settings such
+ * as required/optional, default value, and documentation.
+ * @return a SchemaBuilder
+ */
+public static SchemaBuilder builder() {
+return SchemaBuilder.int32()
+.name(LOGICAL_NAME)
+.version(1);
+}
+
+public static final Schema SCHEMA = builder().schema();
+
+/**
+ * Convert a value from its logical format (Date) to it's encoded format.
+ * @param value the logical value
+ * @return the encoded value
+ */
+public static int fromLogical(Schema schema, java.util.Date value) {
+if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME)))
+throw new DataException("Requested conversion of Date object but 
the schema does not match.");
+Calendar calendar = Calendar.getInstance(UTC);
+calendar.setTime(value);
+if (calendar.get(Calendar.HOUR_OF_DAY) != 0 || 
calendar.get(Calendar.MINUTE) != 0 ||
+calendar.get(Calendar.SECOND) != 0 || 
calendar.get(Calendar.MILLISECOND) != 0) {
+throw new DataException("Kafka Connect Date type should not have 
any time fields set to non-zero values.");
+}
+long unixMillis = calendar.getTimeInMillis();
+return (int) (unixMillis / MILLIS_PER_DAY);
+}
+
+public static java.util.Date toLogical(Schema schema, int value) {
+if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME)))
+throw new DataException("Requested conversion of Date object but 
the schema does not match.");
+return new java.util.Date(value * MILLIS_PER_DAY);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java
--
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java 
b/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java
new file mode 100644
index 000..e15f698
--- /dev/null
+++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * 

[06/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
deleted file mode 100644
index 02ff08b..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/CreateConnectorRequest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime.rest.entities;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.Map;
-import java.util.Objects;
-
-public class CreateConnectorRequest {
-private final String name;
-private final Map config;
-
-@JsonCreator
-public CreateConnectorRequest(@JsonProperty("name") String name, 
@JsonProperty("config") Map config) {
-this.name = name;
-this.config = config;
-}
-
-@JsonProperty
-public String name() {
-return name;
-}
-
-@JsonProperty
-public Map config() {
-return config;
-}
-
-@Override
-public boolean equals(Object o) {
-if (this == o) return true;
-if (o == null || getClass() != o.getClass()) return false;
-CreateConnectorRequest that = (CreateConnectorRequest) o;
-return Objects.equals(name, that.name) &&
-Objects.equals(config, that.config);
-}
-
-@Override
-public int hashCode() {
-return Objects.hash(name, config);
-}
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
deleted file mode 100644
index 6cbc140..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/rest/entities/ErrorMessage.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime.rest.entities;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.Objects;
-
-/**
- * Standard error format for all REST API failures. These are generated 
automatically by
- * {@link org.apache.kafka.copycat.runtime.rest.errors.CopycatExceptionMapper} 
in response to uncaught
- * {@link org.apache.kafka.copycat.errors.CopycatException}s.
- */
-public class ErrorMessage {
-private final int errorCode;
-private final String message;
-
-@JsonCreator
-public ErrorMessage(@JsonProperty("error_code") int errorCode, 
@JsonProperty("message") String message) {
-this.errorCode = errorCode;
-this.message = message;
-}
-
-@JsonProperty("error_code")
-public int errorCode() {
-return errorCode;
-}
-
-@JsonProperty
-public String message() 

[01/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
Repository: kafka
Updated Branches:
  refs/heads/0.9.0 48013222f -> 417e283d6


http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/tests/kafkatest/tests/connect_distributed_test.py
--
diff --git a/tests/kafkatest/tests/connect_distributed_test.py 
b/tests/kafkatest/tests/connect_distributed_test.py
new file mode 100644
index 000..55901c2
--- /dev/null
+++ b/tests/kafkatest/tests/connect_distributed_test.py
@@ -0,0 +1,97 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from kafkatest.tests.kafka_test import KafkaTest
+from kafkatest.services.connect import ConnectDistributedService
+from ducktape.utils.util import wait_until
+import hashlib, subprocess, json, itertools
+
+class ConnectDistributedFileTest(KafkaTest):
+"""
+Simple test of Kafka Connect in distributed mode, producing data from 
files on one cluster and consuming it on
+another, validating the total output is identical to the input.
+"""
+
+INPUT_FILE = "/mnt/connect.input"
+OUTPUT_FILE = "/mnt/connect.output"
+
+TOPIC = "test"
+OFFSETS_TOPIC = "connect-offsets"
+CONFIG_TOPIC = "connect-configs"
+
+# Since tasks can be assigned to any node and we're testing with files, we 
need to make sure the content is the same
+# across all nodes.
+FIRST_INPUT_LIST = ["foo", "bar", "baz"]
+FIRST_INPUTS = "\n".join(FIRST_INPUT_LIST) + "\n"
+SECOND_INPUT_LIST = ["razz", "ma", "tazz"]
+SECOND_INPUTS = "\n".join(SECOND_INPUT_LIST) + "\n"
+
+SCHEMA = { "type": "string", "optional": False }
+
+def __init__(self, test_context):
+super(ConnectDistributedFileTest, self).__init__(test_context, 
num_zk=1, num_brokers=1, topics={
+'test' : { 'partitions': 1, 'replication-factor': 1 }
+})
+
+self.cc = ConnectDistributedService(test_context, 2, self.kafka, 
[self.INPUT_FILE, self.OUTPUT_FILE])
+
+def test_file_source_and_sink(self, 
converter="org.apache.kafka.connect.json.JsonConverter", schemas=True):
+assert converter != None, "converter type must be set"
+# Template parameters
+self.key_converter = converter
+self.value_converter = converter
+self.schemas = schemas
+
+self.cc.set_configs(lambda node: 
self.render("connect-distributed.properties", node=node))
+
+self.cc.start()
+
+self.logger.info("Creating connectors")
+for connector_props in [self.render("connect-file-source.properties"), 
self.render("connect-file-sink.properties")]:
+connector_config = dict([line.strip().split('=', 1) for line in 
connector_props.split('\n') if line.strip() and not 
line.strip().startswith('#')])
+self.cc.create_connector(connector_config)
+
+# Generating data on the source node should generate new records and 
create new output on the sink node. Timeouts
+# here need to be more generous than they are for standalone mode 
because a) it takes longer to write configs,
+# do rebalancing of the group, etc, and b) without explicit leave 
group support, rebalancing takes awhile
+for node in self.cc.nodes:
+node.account.ssh("echo -e -n " + repr(self.FIRST_INPUTS) + " >> " 
+ self.INPUT_FILE)
+wait_until(lambda: self.validate_output(self.FIRST_INPUT_LIST), 
timeout_sec=120, err_msg="Data added to input file was not seen in the output 
file in a reasonable amount of time.")
+
+# Restarting both should result in them picking up where they left off,
+# only processing new data.
+self.cc.restart()
+
+for node in self.cc.nodes:
+node.account.ssh("echo -e -n " + repr(self.SECOND_INPUTS) + " >> " 
+ self.INPUT_FILE)
+wait_until(lambda: self.validate_output(self.FIRST_INPUT_LIST + 
self.SECOND_INPUT_LIST), timeout_sec=120, err_msg="Sink output file never 
converged to the same state as the input file")
+
+def validate_output(self, input):
+input_set = set(input)
+# Output needs to be collected from all nodes because we can't be sure 
where the tasks will be scheduled.
+# Between the first and second rounds, we might even end up with half 
the data on 

[26/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
KAFKA-2774: Rename Copycat to Kafka Connect

Author: Ewen Cheslack-Postava 

Reviewers: Gwen Shapira

Closes #456 from ewencp/kafka-2774-rename-copycat

(cherry picked from commit f2031d40639ef34c1591c22971394ef41c87652c)
Signed-off-by: Gwen Shapira 


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/417e283d
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/417e283d
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/417e283d

Branch: refs/heads/0.9.0
Commit: 417e283d643d8865aa3e79dffa373c8cc853d78f
Parents: 4801322
Author: Ewen Cheslack-Postava 
Authored: Sun Nov 8 22:11:03 2015 -0800
Committer: Gwen Shapira 
Committed: Sun Nov 8 22:11:26 2015 -0800

--
 bin/connect-distributed.sh  |  23 +
 bin/connect-standalone.sh   |  23 +
 bin/copycat-distributed.sh  |  23 -
 bin/copycat-standalone.sh   |  23 -
 bin/kafka-run-class.sh  |   6 +-
 build.gradle|  48 +-
 checkstyle/import-control.xml   |  30 +-
 .../consumer/internals/AbstractCoordinator.java |   2 +-
 config/connect-console-sink.properties  |  19 +
 config/connect-console-source.properties|  19 +
 config/connect-distributed.properties   |  42 +
 config/connect-file-sink.properties |  20 +
 config/connect-file-source.properties   |  20 +
 config/connect-log4j.properties |  23 +
 config/connect-standalone.properties|  37 +
 config/copycat-console-sink.properties  |  19 -
 config/copycat-console-source.properties|  19 -
 config/copycat-distributed.properties   |  42 -
 config/copycat-file-sink.properties |  20 -
 config/copycat-file-source.properties   |  20 -
 config/copycat-log4j.properties |  23 -
 config/copycat-standalone.properties|  37 -
 .../kafka/connect/connector/ConnectRecord.java  | 122 +++
 .../kafka/connect/connector/Connector.java  | 124 +++
 .../connect/connector/ConnectorContext.java |  33 +
 .../apache/kafka/connect/connector/Task.java|  56 ++
 .../kafka/connect/data/ConnectSchema.java   | 323 +++
 .../org/apache/kafka/connect/data/Date.java |  76 ++
 .../org/apache/kafka/connect/data/Decimal.java  |  87 ++
 .../org/apache/kafka/connect/data/Field.java|  77 ++
 .../org/apache/kafka/connect/data/Schema.java   | 163 
 .../kafka/connect/data/SchemaAndValue.java  |  62 ++
 .../kafka/connect/data/SchemaBuilder.java   | 412 +
 .../kafka/connect/data/SchemaProjector.java | 197 
 .../org/apache/kafka/connect/data/Struct.java   | 265 ++
 .../org/apache/kafka/connect/data/Time.java |  77 ++
 .../apache/kafka/connect/data/Timestamp.java|  64 ++
 .../kafka/connect/errors/ConnectException.java  |  40 +
 .../kafka/connect/errors/DataException.java |  35 +
 .../errors/IllegalWorkerStateException.java |  35 +
 .../connect/errors/SchemaBuilderException.java  |  32 +
 .../errors/SchemaProjectorException.java|  29 +
 .../kafka/connect/sink/SinkConnector.java   |  40 +
 .../apache/kafka/connect/sink/SinkRecord.java   |  72 ++
 .../org/apache/kafka/connect/sink/SinkTask.java | 107 +++
 .../kafka/connect/sink/SinkTaskContext.java |  82 ++
 .../kafka/connect/source/SourceConnector.java   |  29 +
 .../kafka/connect/source/SourceRecord.java  | 109 +++
 .../apache/kafka/connect/source/SourceTask.java |  82 ++
 .../kafka/connect/source/SourceTaskContext.java |  32 +
 .../apache/kafka/connect/storage/Converter.java |  57 ++
 .../connect/storage/OffsetStorageReader.java|  65 ++
 .../kafka/connect/storage/StringConverter.java  |  81 ++
 .../kafka/connect/util/ConnectorUtils.java  |  66 ++
 .../connector/ConnectorReconfigurationTest.java |  82 ++
 .../kafka/connect/data/ConnectSchemaTest.java   | 303 ++
 .../org/apache/kafka/connect/data/DateTest.java |  78 ++
 .../apache/kafka/connect/data/DecimalTest.java  |  63 ++
 .../apache/kafka/connect/data/FieldTest.java|  40 +
 .../kafka/connect/data/SchemaBuilderTest.java   | 305 ++
 .../kafka/connect/data/SchemaProjectorTest.java | 495 ++
 .../apache/kafka/connect/data/StructTest.java   | 222 +
 .../org/apache/kafka/connect/data/TimeTest.java |  80 ++
 .../kafka/connect/data/TimestampTest.java   |  75 ++
 .../connect/storage/StringConverterTest.java|  83 ++
 .../kafka/connect/util/ConnectorUtilsTest.java  |  67 ++
 .../connect/file/FileStreamSinkConnector.java   |  69 ++
 .../kafka/connect/file/FileStreamSinkTask.java  |  94 ++
 .../connect/file/FileStreamSourceConnector.java |  77 ++
 .../connect/file/FileStreamSourceTask.java  | 216 +
 

[21/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
new file mode 100644
index 000..6fdefdf
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/AlreadyExistsException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.errors;
+
+/**
+ * Indicates the operation tried to create an entity that already exists.
+ */
+public class AlreadyExistsException extends ConnectException {
+public AlreadyExistsException(String s) {
+super(s);
+}
+
+public AlreadyExistsException(String s, Throwable throwable) {
+super(s, throwable);
+}
+
+public AlreadyExistsException(Throwable throwable) {
+super(throwable);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
new file mode 100644
index 000..a3bbe91
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/NotFoundException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.errors;
+
+/**
+ * Indicates that an operation attempted to modify or delete a connector or 
task that is not present on the worker.
+ */
+public class NotFoundException extends ConnectException {
+public NotFoundException(String s) {
+super(s);
+}
+
+public NotFoundException(String s, Throwable throwable) {
+super(s, throwable);
+}
+
+public NotFoundException(Throwable throwable) {
+super(throwable);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
new file mode 100644
index 000..1b5b07a
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/errors/RetriableException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR 

[07/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
deleted file mode 100644
index 96de1ca..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/distributed/DistributedHerder.java
+++ /dev/null
@@ -1,920 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime.distributed;
-
-import org.apache.kafka.common.errors.WakeupException;
-import org.apache.kafka.common.config.ConfigException;
-import org.apache.kafka.common.utils.SystemTime;
-import org.apache.kafka.common.utils.Time;
-import org.apache.kafka.common.utils.Utils;
-import org.apache.kafka.copycat.connector.ConnectorContext;
-import org.apache.kafka.copycat.errors.AlreadyExistsException;
-import org.apache.kafka.copycat.errors.CopycatException;
-import org.apache.kafka.copycat.errors.NotFoundException;
-import org.apache.kafka.copycat.runtime.ConnectorConfig;
-import org.apache.kafka.copycat.runtime.Herder;
-import org.apache.kafka.copycat.runtime.HerderConnectorContext;
-import org.apache.kafka.copycat.runtime.TaskConfig;
-import org.apache.kafka.copycat.runtime.Worker;
-import org.apache.kafka.copycat.runtime.rest.RestServer;
-import org.apache.kafka.copycat.runtime.rest.entities.ConnectorInfo;
-import org.apache.kafka.copycat.runtime.rest.entities.TaskInfo;
-import org.apache.kafka.copycat.sink.SinkConnector;
-import org.apache.kafka.copycat.storage.KafkaConfigStorage;
-import org.apache.kafka.copycat.util.Callback;
-import org.apache.kafka.copycat.util.ConnectorTaskId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.PriorityQueue;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * 
- * Distributed "herder" that coordinates with other workers to spread work 
across multiple processes.
- * 
- * 
- * Under the hood, this is implemented as a group managed by Kafka's group 
membership facilities (i.e. the generalized
- * group/consumer coordinator). Each instance of DistributedHerder joins 
the group and indicates what it's current
- * configuration state is (where it is in the configuration log). The 
group coordinator selects one member to take
- * this information and assign each instance a subset of the active 
connectors & tasks to execute. This assignment
- * is currently performed in a simple round-robin fashion, but this is not 
guaranteed -- the herder may also choose
- * to, e.g., use a sticky assignment to avoid the usual start/stop costs 
associated with connectors and tasks. Once
- * an assignment is received, the DistributedHerder simply runs its 
assigned connectors and tasks in a Worker.
- * 
- * 
- * In addition to distributing work, the DistributedHerder uses the leader 
determined during the work assignment
- * to select a leader for this generation of the group who is responsible 
for other tasks that can only be performed
- * by a single node at a time. Most importantly, this includes writing 
updated configurations for connectors and tasks,
- * (and therefore, also for creating, destroy, and scaling up/down 
connectors).
- * 
- */
-public class DistributedHerder implements Herder, Runnable {
-private static final Logger log = 
LoggerFactory.getLogger(DistributedHerder.class);
-
-private static 

[20/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
new file mode 100644
index 000..141e430
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime;
+
+import org.apache.kafka.common.utils.Time;
+import org.apache.kafka.clients.producer.Callback;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.source.SourceTask;
+import org.apache.kafka.connect.storage.Converter;
+import org.apache.kafka.connect.storage.OffsetStorageReader;
+import org.apache.kafka.connect.storage.OffsetStorageWriter;
+import org.apache.kafka.connect.util.ConnectorTaskId;
+import org.apache.kafka.connect.util.ShutdownableThread;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * WorkerTask that uses a SourceTask to ingest data into Kafka.
+ */
+class WorkerSourceTask implements WorkerTask {
+private static final Logger log = 
LoggerFactory.getLogger(WorkerSourceTask.class);
+
+private final ConnectorTaskId id;
+private final SourceTask task;
+private final Converter keyConverter;
+private final Converter valueConverter;
+private KafkaProducer producer;
+private WorkerSourceTaskThread workThread;
+private final OffsetStorageReader offsetReader;
+private final OffsetStorageWriter offsetWriter;
+private final WorkerConfig workerConfig;
+private final Time time;
+
+// Use IdentityHashMap to ensure correctness with duplicate records. This 
is a HashMap because
+// there is no IdentityHashSet.
+private IdentityHashMap, 
ProducerRecord> outstandingMessages;
+// A second buffer is used while an offset flush is running
+private IdentityHashMap, 
ProducerRecord> outstandingMessagesBacklog;
+private boolean flushing;
+
+public WorkerSourceTask(ConnectorTaskId id, SourceTask task,
+Converter keyConverter, Converter valueConverter,
+KafkaProducer producer,
+OffsetStorageReader offsetReader, 
OffsetStorageWriter offsetWriter,
+WorkerConfig workerConfig, Time time) {
+this.id = id;
+this.task = task;
+this.keyConverter = keyConverter;
+this.valueConverter = valueConverter;
+this.producer = producer;
+this.offsetReader = offsetReader;
+this.offsetWriter = offsetWriter;
+this.workerConfig = workerConfig;
+this.time = time;
+
+this.outstandingMessages = new IdentityHashMap<>();
+this.outstandingMessagesBacklog = new IdentityHashMap<>();
+this.flushing = false;
+}
+
+@Override
+public void start(Map props) {
+workThread = new WorkerSourceTaskThread("WorkerSourceTask-" + id, 
props);
+workThread.start();
+}
+
+@Override
+public void stop() {
+if (workThread != null)
+workThread.startGracefulShutdown();
+}
+
+@Override
+public boolean awaitStop(long timeoutMs) {
+boolean success = true;
+if (workThread != null) {
+try {
+success = workThread.awaitShutdown(timeoutMs, 
TimeUnit.MILLISECONDS);
+   

[13/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java
--
diff --git 
a/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java 
b/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java
deleted file mode 100644
index 104abf1..000
--- a/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.data;
-
-import org.apache.kafka.copycat.errors.DataException;
-
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.util.*;
-
-public class CopycatSchema implements Schema {
-/**
- * Maps Schema.Types to a list of Java classes that can be used to 
represent them.
- */
-private static final Map SCHEMA_TYPE_CLASSES = new 
HashMap<>();
-/**
- * Maps known logical types to a list of Java classes that can be used to 
represent them.
- */
-private static final Map LOGICAL_TYPE_CLASSES = new 
HashMap<>();
-
-/**
- * Maps the Java classes to the corresponding Schema.Type.
- */
-private static final Map JAVA_CLASS_SCHEMA_TYPES = new 
HashMap<>();
-
-static {
-SCHEMA_TYPE_CLASSES.put(Type.INT8, Arrays.asList((Class) Byte.class));
-SCHEMA_TYPE_CLASSES.put(Type.INT16, Arrays.asList((Class) 
Short.class));
-SCHEMA_TYPE_CLASSES.put(Type.INT32, Arrays.asList((Class) 
Integer.class));
-SCHEMA_TYPE_CLASSES.put(Type.INT64, Arrays.asList((Class) Long.class));
-SCHEMA_TYPE_CLASSES.put(Type.FLOAT32, Arrays.asList((Class) 
Float.class));
-SCHEMA_TYPE_CLASSES.put(Type.FLOAT64, Arrays.asList((Class) 
Double.class));
-SCHEMA_TYPE_CLASSES.put(Type.BOOLEAN, Arrays.asList((Class) 
Boolean.class));
-SCHEMA_TYPE_CLASSES.put(Type.STRING, Arrays.asList((Class) 
String.class));
-// Bytes are special and have 2 representations. byte[] causes 
problems because it doesn't handle equals() and
-// hashCode() like we want objects to, so we support both byte[] and 
ByteBuffer. Using plain byte[] can cause
-// those methods to fail, so ByteBuffers are recommended
-SCHEMA_TYPE_CLASSES.put(Type.BYTES, Arrays.asList((Class) 
byte[].class, (Class) ByteBuffer.class));
-SCHEMA_TYPE_CLASSES.put(Type.ARRAY, Arrays.asList((Class) List.class));
-SCHEMA_TYPE_CLASSES.put(Type.MAP, Arrays.asList((Class) Map.class));
-SCHEMA_TYPE_CLASSES.put(Type.STRUCT, Arrays.asList((Class) 
Struct.class));
-
-for (Map.Entry schemaClasses : 
SCHEMA_TYPE_CLASSES.entrySet()) {
-for (Class schemaClass : schemaClasses.getValue())
-JAVA_CLASS_SCHEMA_TYPES.put(schemaClass, 
schemaClasses.getKey());
-}
-
-LOGICAL_TYPE_CLASSES.put(Decimal.LOGICAL_NAME, Arrays.asList((Class) 
BigDecimal.class));
-LOGICAL_TYPE_CLASSES.put(Date.LOGICAL_NAME, Arrays.asList((Class) 
java.util.Date.class));
-LOGICAL_TYPE_CLASSES.put(Time.LOGICAL_NAME, Arrays.asList((Class) 
java.util.Date.class));
-LOGICAL_TYPE_CLASSES.put(Timestamp.LOGICAL_NAME, Arrays.asList((Class) 
java.util.Date.class));
-// We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since 
that's only used to determine schemas for
-// schemaless data and logical types will have ambiguous schemas (e.g. 
many of them use the same Java class) so
-// they should not be used without schemas.
-}
-
-// The type of the field
-private final Type type;
-private final boolean optional;
-private final Object defaultValue;
-
-private final List fields;
-private final Map fieldsByName;
-
-private final Schema keySchema;
-private final Schema valueSchema;
-
-// Optional name and version provide a built-in way to indicate what type 
of data is included. Most
-// useful for structs to indicate the semantics of the struct and map it 
to some existing underlying
-// serializer-specific schema. 

[25/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java
--
diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java 
b/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java
new file mode 100644
index 000..842da66
--- /dev/null
+++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Date.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.data;
+
+import org.apache.kafka.connect.errors.DataException;
+
+import java.util.Calendar;
+import java.util.TimeZone;
+
+/**
+ * 
+ * A date representing a calendar day with no time of day or timezone. The 
corresponding Java type is a java.util.Date
+ * with hours, minutes, seconds, milliseconds set to 0. The underlying 
representation is an integer representing the
+ * number of standardized days (based on a number of milliseconds with 24 
hours/day, 60 minutes/hour, 60 seconds/minute,
+ * 1000 milliseconds/second with n) since Unix epoch.
+ * 
+ */
+public class Date {
+public static final String LOGICAL_NAME = 
"org.apache.kafka.connect.data.Date";
+
+private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
+
+private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
+
+/**
+ * Returns a SchemaBuilder for a Date. By returning a SchemaBuilder you 
can override additional schema settings such
+ * as required/optional, default value, and documentation.
+ * @return a SchemaBuilder
+ */
+public static SchemaBuilder builder() {
+return SchemaBuilder.int32()
+.name(LOGICAL_NAME)
+.version(1);
+}
+
+public static final Schema SCHEMA = builder().schema();
+
+/**
+ * Convert a value from its logical format (Date) to it's encoded format.
+ * @param value the logical value
+ * @return the encoded value
+ */
+public static int fromLogical(Schema schema, java.util.Date value) {
+if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME)))
+throw new DataException("Requested conversion of Date object but 
the schema does not match.");
+Calendar calendar = Calendar.getInstance(UTC);
+calendar.setTime(value);
+if (calendar.get(Calendar.HOUR_OF_DAY) != 0 || 
calendar.get(Calendar.MINUTE) != 0 ||
+calendar.get(Calendar.SECOND) != 0 || 
calendar.get(Calendar.MILLISECOND) != 0) {
+throw new DataException("Kafka Connect Date type should not have 
any time fields set to non-zero values.");
+}
+long unixMillis = calendar.getTimeInMillis();
+return (int) (unixMillis / MILLIS_PER_DAY);
+}
+
+public static java.util.Date toLogical(Schema schema, int value) {
+if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME)))
+throw new DataException("Requested conversion of Date object but 
the schema does not match.");
+return new java.util.Date(value * MILLIS_PER_DAY);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java
--
diff --git 
a/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java 
b/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java
new file mode 100644
index 000..e15f698
--- /dev/null
+++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * 

[18/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
new file mode 100644
index 000..c8e0f6f
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequest.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime.rest.entities;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.Map;
+import java.util.Objects;
+
+public class CreateConnectorRequest {
+private final String name;
+private final Map config;
+
+@JsonCreator
+public CreateConnectorRequest(@JsonProperty("name") String name, 
@JsonProperty("config") Map config) {
+this.name = name;
+this.config = config;
+}
+
+@JsonProperty
+public String name() {
+return name;
+}
+
+@JsonProperty
+public Map config() {
+return config;
+}
+
+@Override
+public boolean equals(Object o) {
+if (this == o) return true;
+if (o == null || getClass() != o.getClass()) return false;
+CreateConnectorRequest that = (CreateConnectorRequest) o;
+return Objects.equals(name, that.name) &&
+Objects.equals(config, that.config);
+}
+
+@Override
+public int hashCode() {
+return Objects.hash(name, config);
+}
+}

http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
--
diff --git 
a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
new file mode 100644
index 000..493b00d
--- /dev/null
+++ 
b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime.rest.entities;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.Objects;
+
+/**
+ * Standard error format for all REST API failures. These are generated 
automatically by
+ * {@link ConnectExceptionMapper} in response to uncaught
+ * {@link ConnectException}s.
+ */
+public class ErrorMessage {
+private final int errorCode;
+private final String message;
+
+@JsonCreator
+public ErrorMessage(@JsonProperty("error_code") int errorCode, 
@JsonProperty("message") String message) {
+this.errorCode = errorCode;
+this.message = message;
+}
+
+@JsonProperty("error_code")
+public int errorCode() {
+return errorCode;
+}
+
+@JsonProperty
+public String message() {
+return message;
+}
+
+@Override
+public boolean equals(Object 

[16/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
--
diff --git 
a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
new file mode 100644
index 000..6915631
--- /dev/null
+++ 
b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java
@@ -0,0 +1,563 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+package org.apache.kafka.connect.runtime;
+
+import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.clients.consumer.OffsetCommitCallback;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Time;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.SchemaAndValue;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
+import org.apache.kafka.connect.sink.SinkConnector;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kafka.connect.sink.SinkTask;
+import org.apache.kafka.connect.storage.Converter;
+import org.apache.kafka.connect.util.ConnectorTaskId;
+import org.apache.kafka.connect.util.MockTime;
+import org.apache.kafka.connect.util.ThreadedTest;
+import org.easymock.Capture;
+import org.easymock.CaptureType;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.easymock.IExpectationSetters;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.api.easymock.annotation.Mock;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.powermock.reflect.Whitebox;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(WorkerSinkTask.class)
+@PowerMockIgnore("javax.management.*")
+public class WorkerSinkTaskThreadedTest extends ThreadedTest {
+
+// These are fixed to keep this code simpler. In this example we assume 
byte[] raw values
+// with mix of integer/string in Connect
+private static final String TOPIC = "test";
+private static final int PARTITION = 12;
+private static final int PARTITION2 = 13;
+private static final int PARTITION3 = 14;
+private static final long FIRST_OFFSET = 45;
+private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA;
+private static final int KEY = 12;
+private static final Schema VALUE_SCHEMA = Schema.STRING_SCHEMA;
+private static final String VALUE = "VALUE";
+private static final byte[] RAW_KEY = "key".getBytes();
+private static final byte[] RAW_VALUE = "value".getBytes();
+
+private static final TopicPartition TOPIC_PARTITION = new 
TopicPartition(TOPIC, PARTITION);
+private static final TopicPartition TOPIC_PARTITION2 = new 
TopicPartition(TOPIC, PARTITION2);
+private static final TopicPartition TOPIC_PARTITION3 = new 
TopicPartition(TOPIC, PARTITION3);
+private static final TopicPartition UNASSIGNED_TOPIC_PARTITION = new 
TopicPartition(TOPIC, 200);
+
+private static final Map TASK_PROPS = new HashMap<>();
+static {
+TASK_PROPS.put(SinkConnector.TOPICS_CONFIG, TOPIC);
+}
+
+private ConnectorTaskId taskId = new ConnectorTaskId("job", 0);
+private Time time;
+@Mock private SinkTask sinkTask;
+private Capture 

[23/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java
--
diff --git 
a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java
 
b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java
new file mode 100644
index 000..0b1760b
--- /dev/null
+++ 
b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java
@@ -0,0 +1,495 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements.  See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License.  You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ **/
+
+package org.apache.kafka.connect.data;
+
+import org.apache.kafka.connect.data.Schema.Type;
+import org.apache.kafka.connect.errors.DataException;
+import org.apache.kafka.connect.errors.SchemaProjectorException;
+import org.junit.Test;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class SchemaProjectorTest {
+
+@Test
+public void testPrimitiveTypeProjection() throws Exception {
+Object projected;
+projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, 
Schema.BOOLEAN_SCHEMA);
+assertEquals(false, projected);
+
+byte[] bytes = {(byte) 1, (byte) 2};
+projected  = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, 
Schema.BYTES_SCHEMA);
+assertEquals(bytes, projected);
+
+projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", 
Schema.STRING_SCHEMA);
+assertEquals("abc", projected);
+
+projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, 
Schema.OPTIONAL_BOOLEAN_SCHEMA);
+assertEquals(false, projected);
+
+projected  = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, 
Schema.OPTIONAL_BYTES_SCHEMA);
+assertEquals(bytes, projected);
+
+projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", 
Schema.OPTIONAL_STRING_SCHEMA);
+assertEquals("abc", projected);
+
+try {
+SchemaProjector.project(Schema.OPTIONAL_BOOLEAN_SCHEMA, false, 
Schema.BOOLEAN_SCHEMA);
+fail("Cannot project optional schema to schema with no default 
value.");
+} catch (DataException e) {
+// expected
+}
+
+try {
+SchemaProjector.project(Schema.OPTIONAL_BYTES_SCHEMA, bytes, 
Schema.BYTES_SCHEMA);
+fail("Cannot project optional schema to schema with no default 
value.");
+} catch (DataException e) {
+// expected
+}
+
+try {
+SchemaProjector.project(Schema.OPTIONAL_STRING_SCHEMA, "abc", 
Schema.STRING_SCHEMA);
+fail("Cannot project optional schema to schema with no default 
value.");
+} catch (DataException e) {
+// expected
+}
+}
+
+@Test
+public void testNumericTypeProjection() throws Exception {
+Schema[] promotableSchemas = {Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, 
Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA, 
Schema.FLOAT64_SCHEMA};
+Schema[] promotableOptionalSchemas = {Schema.OPTIONAL_INT8_SCHEMA, 
Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, 
Schema.OPTIONAL_INT64_SCHEMA,
+  Schema.OPTIONAL_FLOAT32_SCHEMA, 
Schema.OPTIONAL_FLOAT64_SCHEMA};
+
+Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 
1.2345};
+Map expectedProjected = new HashMap<>();
+expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 
127, 127, 127L, 127.F, 127.));
+expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 
255.F, 255.));
+expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 
32767.));
+expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 
327890.));
+expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2));
+expectedProjected.put(values[5], Arrays.asList(1.2345));
+
+

[09/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
deleted file mode 100644
index b09cb53..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/AlreadyExistsException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.errors;
-
-/**
- * Indicates the operation tried to create an entity that already exists.
- */
-public class AlreadyExistsException extends CopycatException {
-public AlreadyExistsException(String s) {
-super(s);
-}
-
-public AlreadyExistsException(String s, Throwable throwable) {
-super(s, throwable);
-}
-
-public AlreadyExistsException(Throwable throwable) {
-super(throwable);
-}
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
deleted file mode 100644
index a8e13a9..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/NotFoundException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.errors;
-
-/**
- * Indicates that an operation attempted to modify or delete a connector or 
task that is not present on the worker.
- */
-public class NotFoundException extends CopycatException {
-public NotFoundException(String s) {
-super(s);
-}
-
-public NotFoundException(String s, Throwable throwable) {
-super(s, throwable);
-}
-
-public NotFoundException(Throwable throwable) {
-super(throwable);
-}
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/417e283d/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
deleted file mode 100644
index 75821aa..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/errors/RetriableException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT 

[04/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/WorkerSinkTaskThreadedTest.java
--
diff --git 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/WorkerSinkTaskThreadedTest.java
 
b/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/WorkerSinkTaskThreadedTest.java
deleted file mode 100644
index ded78a1..000
--- 
a/copycat/runtime/src/test/java/org/apache/kafka/copycat/runtime/WorkerSinkTaskThreadedTest.java
+++ /dev/null
@@ -1,563 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime;
-
-import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.consumer.OffsetAndMetadata;
-import org.apache.kafka.clients.consumer.OffsetCommitCallback;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.utils.Time;
-import org.apache.kafka.copycat.data.Schema;
-import org.apache.kafka.copycat.data.SchemaAndValue;
-import org.apache.kafka.copycat.errors.CopycatException;
-import org.apache.kafka.copycat.runtime.standalone.StandaloneConfig;
-import org.apache.kafka.copycat.sink.SinkConnector;
-import org.apache.kafka.copycat.sink.SinkRecord;
-import org.apache.kafka.copycat.sink.SinkTask;
-import org.apache.kafka.copycat.storage.Converter;
-import org.apache.kafka.copycat.util.ConnectorTaskId;
-import org.apache.kafka.copycat.util.MockTime;
-import org.apache.kafka.copycat.util.ThreadedTest;
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.easymock.EasyMock;
-import org.easymock.IAnswer;
-import org.easymock.IExpectationSetters;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.api.easymock.PowerMock;
-import org.powermock.api.easymock.annotation.Mock;
-import org.powermock.core.classloader.annotations.PowerMockIgnore;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-import org.powermock.reflect.Whitebox;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-@RunWith(PowerMockRunner.class)
-@PrepareForTest(WorkerSinkTask.class)
-@PowerMockIgnore("javax.management.*")
-public class WorkerSinkTaskThreadedTest extends ThreadedTest {
-
-// These are fixed to keep this code simpler. In this example we assume 
byte[] raw values
-// with mix of integer/string in Copycat
-private static final String TOPIC = "test";
-private static final int PARTITION = 12;
-private static final int PARTITION2 = 13;
-private static final int PARTITION3 = 14;
-private static final long FIRST_OFFSET = 45;
-private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA;
-private static final int KEY = 12;
-private static final Schema VALUE_SCHEMA = Schema.STRING_SCHEMA;
-private static final String VALUE = "VALUE";
-private static final byte[] RAW_KEY = "key".getBytes();
-private static final byte[] RAW_VALUE = "value".getBytes();
-
-private static final TopicPartition TOPIC_PARTITION = new 
TopicPartition(TOPIC, PARTITION);
-private static final TopicPartition TOPIC_PARTITION2 = new 
TopicPartition(TOPIC, PARTITION2);
-private static final TopicPartition TOPIC_PARTITION3 = new 
TopicPartition(TOPIC, PARTITION3);
-private static final TopicPartition UNASSIGNED_TOPIC_PARTITION = new 
TopicPartition(TOPIC, 200);
-
-private static final Map TASK_PROPS = new HashMap<>();
-static {
-TASK_PROPS.put(SinkConnector.TOPICS_CONFIG, TOPIC);
-}
-
-private ConnectorTaskId taskId = new ConnectorTaskId("job", 0);
-private Time time;
-@Mock private SinkTask sinkTask;
-private 

[13/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java
--
diff --git 
a/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java 
b/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java
deleted file mode 100644
index 104abf1..000
--- a/copycat/api/src/main/java/org/apache/kafka/copycat/data/CopycatSchema.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.data;
-
-import org.apache.kafka.copycat.errors.DataException;
-
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.util.*;
-
-public class CopycatSchema implements Schema {
-/**
- * Maps Schema.Types to a list of Java classes that can be used to 
represent them.
- */
-private static final Map SCHEMA_TYPE_CLASSES = new 
HashMap<>();
-/**
- * Maps known logical types to a list of Java classes that can be used to 
represent them.
- */
-private static final Map LOGICAL_TYPE_CLASSES = new 
HashMap<>();
-
-/**
- * Maps the Java classes to the corresponding Schema.Type.
- */
-private static final Map JAVA_CLASS_SCHEMA_TYPES = new 
HashMap<>();
-
-static {
-SCHEMA_TYPE_CLASSES.put(Type.INT8, Arrays.asList((Class) Byte.class));
-SCHEMA_TYPE_CLASSES.put(Type.INT16, Arrays.asList((Class) 
Short.class));
-SCHEMA_TYPE_CLASSES.put(Type.INT32, Arrays.asList((Class) 
Integer.class));
-SCHEMA_TYPE_CLASSES.put(Type.INT64, Arrays.asList((Class) Long.class));
-SCHEMA_TYPE_CLASSES.put(Type.FLOAT32, Arrays.asList((Class) 
Float.class));
-SCHEMA_TYPE_CLASSES.put(Type.FLOAT64, Arrays.asList((Class) 
Double.class));
-SCHEMA_TYPE_CLASSES.put(Type.BOOLEAN, Arrays.asList((Class) 
Boolean.class));
-SCHEMA_TYPE_CLASSES.put(Type.STRING, Arrays.asList((Class) 
String.class));
-// Bytes are special and have 2 representations. byte[] causes 
problems because it doesn't handle equals() and
-// hashCode() like we want objects to, so we support both byte[] and 
ByteBuffer. Using plain byte[] can cause
-// those methods to fail, so ByteBuffers are recommended
-SCHEMA_TYPE_CLASSES.put(Type.BYTES, Arrays.asList((Class) 
byte[].class, (Class) ByteBuffer.class));
-SCHEMA_TYPE_CLASSES.put(Type.ARRAY, Arrays.asList((Class) List.class));
-SCHEMA_TYPE_CLASSES.put(Type.MAP, Arrays.asList((Class) Map.class));
-SCHEMA_TYPE_CLASSES.put(Type.STRUCT, Arrays.asList((Class) 
Struct.class));
-
-for (Map.Entry schemaClasses : 
SCHEMA_TYPE_CLASSES.entrySet()) {
-for (Class schemaClass : schemaClasses.getValue())
-JAVA_CLASS_SCHEMA_TYPES.put(schemaClass, 
schemaClasses.getKey());
-}
-
-LOGICAL_TYPE_CLASSES.put(Decimal.LOGICAL_NAME, Arrays.asList((Class) 
BigDecimal.class));
-LOGICAL_TYPE_CLASSES.put(Date.LOGICAL_NAME, Arrays.asList((Class) 
java.util.Date.class));
-LOGICAL_TYPE_CLASSES.put(Time.LOGICAL_NAME, Arrays.asList((Class) 
java.util.Date.class));
-LOGICAL_TYPE_CLASSES.put(Timestamp.LOGICAL_NAME, Arrays.asList((Class) 
java.util.Date.class));
-// We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since 
that's only used to determine schemas for
-// schemaless data and logical types will have ambiguous schemas (e.g. 
many of them use the same Java class) so
-// they should not be used without schemas.
-}
-
-// The type of the field
-private final Type type;
-private final boolean optional;
-private final Object defaultValue;
-
-private final List fields;
-private final Map fieldsByName;
-
-private final Schema keySchema;
-private final Schema valueSchema;
-
-// Optional name and version provide a built-in way to indicate what type 
of data is included. Most
-// useful for structs to indicate the semantics of the struct and map it 
to some existing underlying
-// serializer-specific schema. 

[05/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStore.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStore.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStore.java
deleted file mode 100644
index b270368..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/storage/KafkaOffsetBackingStore.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.storage;
-
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.RecordMetadata;
-import org.apache.kafka.common.utils.SystemTime;
-import org.apache.kafka.copycat.errors.CopycatException;
-import org.apache.kafka.copycat.util.Callback;
-import org.apache.kafka.copycat.util.ConvertingFutureCallback;
-import org.apache.kafka.copycat.util.KafkaBasedLog;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * 
- * Implementation of OffsetBackingStore that uses a Kafka topic to store 
offset data.
- * 
- * 
- * Internally, this implementation both produces to and consumes from a 
Kafka topic which stores the offsets.
- * It accepts producer and consumer overrides via its configuration but 
forces some settings to specific values
- * to ensure correct behavior (e.g. acks, auto.offset.reset).
- * 
- */
-public class KafkaOffsetBackingStore implements OffsetBackingStore {
-private static final Logger log = 
LoggerFactory.getLogger(KafkaOffsetBackingStore.class);
-
-public final static String OFFSET_STORAGE_TOPIC_CONFIG = 
"offset.storage.topic";
-
-private KafkaBasedLog offsetLog;
-private HashMap data;
-
-@Override
-public void configure(Map configs) {
-String topic = (String) configs.get(OFFSET_STORAGE_TOPIC_CONFIG);
-if (topic == null)
-throw new CopycatException("Offset storage topic must be 
specified");
-
-data = new HashMap<>();
-
-Map producerProps = new HashMap<>();
-producerProps.putAll(configs);
-producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArraySerializer");
-producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArraySerializer");
-producerProps.put(ProducerConfig.ACKS_CONFIG, "all");
-
-Map consumerProps = new HashMap<>();
-consumerProps.putAll(configs);
-consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
-consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
-consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
-
-offsetLog = createKafkaBasedLog(topic, producerProps, consumerProps, 
consumedCallback);
-}
-
-@Override
-public void start() {
-log.info("Starting KafkaOffsetBackingStore");
-offsetLog.start();
-log.info("Finished reading offsets topic and starting 
KafkaOffsetBackingStore");
-}
-
-@Override
-public void stop() {
-log.info("Stopping KafkaOffsetBackingStore");
-offsetLog.stop();
-log.info("Stopped KafkaOffsetBackingStore");
-}
-
-@Override
-public Future> get(final 
Collection keys,
-   final 
Callback> 

[08/26] kafka git commit: KAFKA-2774: Rename Copycat to Kafka Connect

2015-11-08 Thread gwenshap
http://git-wip-us.apache.org/repos/asf/kafka/blob/f2031d40/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
--
diff --git 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
 
b/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
deleted file mode 100644
index 6577fe9..000
--- 
a/copycat/runtime/src/main/java/org/apache/kafka/copycat/runtime/WorkerSourceTask.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.kafka.copycat.runtime;
-
-import org.apache.kafka.common.utils.Time;
-import org.apache.kafka.clients.producer.Callback;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
-import org.apache.kafka.copycat.source.SourceRecord;
-import org.apache.kafka.copycat.source.SourceTask;
-import org.apache.kafka.copycat.storage.Converter;
-import org.apache.kafka.copycat.storage.OffsetStorageReader;
-import org.apache.kafka.copycat.storage.OffsetStorageWriter;
-import org.apache.kafka.copycat.util.ConnectorTaskId;
-import org.apache.kafka.copycat.util.ShutdownableThread;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.IdentityHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * WorkerTask that uses a SourceTask to ingest data into Kafka.
- */
-class WorkerSourceTask implements WorkerTask {
-private static final Logger log = 
LoggerFactory.getLogger(WorkerSourceTask.class);
-
-private final ConnectorTaskId id;
-private final SourceTask task;
-private final Converter keyConverter;
-private final Converter valueConverter;
-private KafkaProducer producer;
-private WorkerSourceTaskThread workThread;
-private final OffsetStorageReader offsetReader;
-private final OffsetStorageWriter offsetWriter;
-private final WorkerConfig workerConfig;
-private final Time time;
-
-// Use IdentityHashMap to ensure correctness with duplicate records. This 
is a HashMap because
-// there is no IdentityHashSet.
-private IdentityHashMap, 
ProducerRecord> outstandingMessages;
-// A second buffer is used while an offset flush is running
-private IdentityHashMap, 
ProducerRecord> outstandingMessagesBacklog;
-private boolean flushing;
-
-public WorkerSourceTask(ConnectorTaskId id, SourceTask task,
-Converter keyConverter, Converter valueConverter,
-KafkaProducer producer,
-OffsetStorageReader offsetReader, 
OffsetStorageWriter offsetWriter,
-WorkerConfig workerConfig, Time time) {
-this.id = id;
-this.task = task;
-this.keyConverter = keyConverter;
-this.valueConverter = valueConverter;
-this.producer = producer;
-this.offsetReader = offsetReader;
-this.offsetWriter = offsetWriter;
-this.workerConfig = workerConfig;
-this.time = time;
-
-this.outstandingMessages = new IdentityHashMap<>();
-this.outstandingMessagesBacklog = new IdentityHashMap<>();
-this.flushing = false;
-}
-
-@Override
-public void start(Map props) {
-workThread = new WorkerSourceTaskThread("WorkerSourceTask-" + id, 
props);
-workThread.start();
-}
-
-@Override
-public void stop() {
-if (workThread != null)
-workThread.startGracefulShutdown();
-}
-
-@Override
-public boolean awaitStop(long timeoutMs) {
-boolean success = true;
-if (workThread != null) {
-try {
-success = workThread.awaitShutdown(timeoutMs, 
TimeUnit.MILLISECONDS);

  1   2   3   4   5   6   7   >