[ 
https://issues.apache.org/jira/browse/STORM-405?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14078482#comment-14078482
 ] 

ASF GitHub Bot commented on STORM-405:
--------------------------------------

Github user wurstmeister commented on a diff in the pull request:

    https://github.com/apache/incubator-storm/pull/195#discussion_r15556066
  
    --- Diff: 
external/storm-kafka/src/test/storm/kafka/TridentKafkaTopology.java ---
    @@ -0,0 +1,101 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package storm.kafka;
    +
    +import backtype.storm.Config;
    +import backtype.storm.LocalCluster;
    +import backtype.storm.generated.StormTopology;
    +import backtype.storm.tuple.Fields;
    +import backtype.storm.tuple.Values;
    +import storm.kafka.trident.TridentKafkaState;
    +import storm.kafka.trident.TridentKafkaStateFactory;
    +import storm.kafka.trident.TridentKafkaUpdater;
    +import storm.kafka.trident.mapper.FieldNameBasedTupleToKafkaMapper;
    +import storm.kafka.trident.selector.DefaultTopicSelector;
    +import storm.trident.Stream;
    +import storm.trident.TridentTopology;
    +import storm.trident.testing.FixedBatchSpout;
    +
    +import java.util.HashMap;
    +import java.util.Map;
    +import java.util.Properties;
    +
    +
    +public class TridentKafkaTopology {
    +
    +    private static StormTopology buildTopology() {
    +        Fields fields = new Fields("word", "count");
    +        FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
    +                new Values("storm", "1"),
    +                new Values("trident", "1"),
    +                new Values("needs", "1"),
    +                new Values("javadoc", "1")
    +        );
    +        spout.setCycle(true);
    +
    +        TridentTopology topology = new TridentTopology();
    +        Stream stream = topology.newStream("spout1", spout);
    +
    +        TridentKafkaStateFactory stateFactory = new 
TridentKafkaStateFactory()
    +                .withKafkaTopicSelector(new DefaultTopicSelector("test"))
    +                .withTridentTupleToKafkaMapper(new 
FieldNameBasedTupleToKafkaMapper("word", "count"));
    +        stream.partitionPersist(stateFactory, fields, new 
TridentKafkaUpdater(), new Fields());
    +
    +        return topology.build();
    +    }
    +
    +    /**
    +     * To run this topology ensure you have a kafka broker running and 
provide connection string to broker as argument.
    +     * Create a topic test with command line,
    +     * kafka-topics.sh --create --zookeeper localhost:2181 
--replication-factor 1 --partition 1 --topic test
    +     *
    +     * run this program and run the kafka consumer:
    +     * afka-console-consumer.sh --zookeeper localhost:2181 --whitelist 
test --from-beginning
    +     *
    +     * you should see the messages flowing through.
    +     *
    +     * @param args
    +     * @throws Exception
    +     */
    +    public static void main(String[] args) throws Exception {
    +        if(args.length < 1) {
    +            System.out.println("Please provide kafka broker url ,e.g. 
localhost:9092");
    +        }
    +
    +        Config conf = getConfig(args[0]);
    +        LocalCluster cluster = new LocalCluster();
    +        cluster.submitTopology("wordCounter", conf, buildTopology());
    +        Thread.sleep(60 * 1000);
    +        cluster.killTopology("wordCounter");
    +
    +        cluster.shutdown();
    +    }
    +
    +    private  static Config getConfig(String brokerConnectionString) {
    +        Config conf = new Config();
    +        Map config = new HashMap();
    +        Properties props = new Properties();
    +        props.put("metadata.broker.list", brokerConnectionString);
    +        props.put("request.required.acks", "1");
    +        props.put("serializer.class", "kafka.serializer.StringEncoder");
    +        conf.put(TridentKafkaState.KAFKA_BROKER_PROPERTIES, props);
    +        conf.put(TridentKafkaState.TOPIC, "test");
    --- End diff --
    
    is this really required? we are using a topic selector in the state 
factory. (as far as i can see we don't even need the constant in 
TridentKafkaState anymore)


> Add kafka trident state so messages can be sent to kafka topics
> ---------------------------------------------------------------
>
>                 Key: STORM-405
>                 URL: https://issues.apache.org/jira/browse/STORM-405
>             Project: Apache Storm (Incubating)
>          Issue Type: Improvement
>    Affects Versions: 0.9.3-incubating
>            Reporter: Parth Brahmbhatt
>            Priority: Minor
>             Fix For: 0.9.3-incubating
>
>
> Currently storm has a bolt for writing to kafka but we have no implementation 
> of trident state. We need a trident state implementation that allows wrting 
> tuples directly to kafka topics as part of trident topology. 



--
This message was sent by Atlassian JIRA
(v6.2#6252)

Reply via email to