[ 
https://issues.apache.org/jira/browse/STORM-211?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14013000#comment-14013000
 ] 

ASF GitHub Bot commented on STORM-211:
--------------------------------------

Github user revans2 commented on a diff in the pull request:

    https://github.com/apache/incubator-storm/pull/128#discussion_r13207702
  
    --- Diff: 
external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java ---
    @@ -0,0 +1,129 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.storm.hdfs.bolt;
    +
    +import backtype.storm.task.OutputCollector;
    +import backtype.storm.task.TopologyContext;
    +import backtype.storm.tuple.Tuple;
    +import org.apache.hadoop.fs.FSDataOutputStream;
    +import org.apache.hadoop.fs.FileSystem;
    +import org.apache.hadoop.fs.Path;
    +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
    +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
    +import org.apache.storm.hdfs.bolt.format.FileNameFormat;
    +import org.apache.storm.hdfs.bolt.format.RecordFormat;
    +import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy;
    +import org.apache.storm.hdfs.bolt.sync.SyncPolicy;
    +import org.apache.storm.hdfs.common.rotation.RotationAction;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import java.io.IOException;
    +import java.net.URI;
    +import java.util.EnumSet;
    +import java.util.Map;
    +
    +public class HdfsBolt extends AbstractHdfsBolt{
    +    private static final Logger LOG = 
LoggerFactory.getLogger(HdfsBolt.class);
    +
    +    private FSDataOutputStream out;
    +    private RecordFormat format;
    +    private long offset = 0;
    +
    +    public HdfsBolt withFsUrl(String fsUrl){
    +        this.fsUrl = fsUrl;
    +        return this;
    +    }
    +
    +    public HdfsBolt withConfigKey(String configKey){
    +        this.configKey = configKey;
    +        return this;
    +    }
    +
    +    public HdfsBolt withFileNameFormat(FileNameFormat fileNameFormat){
    +        this.fileNameFormat = fileNameFormat;
    +        return this;
    +    }
    +
    +    public HdfsBolt withRecordFormat(RecordFormat format){
    +        this.format = format;
    +        return this;
    +    }
    +
    +    public HdfsBolt withSyncPolicy(SyncPolicy syncPolicy){
    +        this.syncPolicy = syncPolicy;
    +        return this;
    +    }
    +
    +    public HdfsBolt withRotationPolicy(FileRotationPolicy rotationPolicy){
    +        this.rotationPolicy = rotationPolicy;
    +        return this;
    +    }
    +
    +    public HdfsBolt addRotationAction(RotationAction action){
    +        this.rotationActions.add(action);
    +        return this;
    +    }
    +
    +    @Override
    +    public void doPrepare(Map conf, TopologyContext topologyContext, 
OutputCollector collector) throws IOException {
    +        LOG.info("Preparing HDFS Bolt...");
    +        this.fs = FileSystem.get(URI.create(this.fsUrl), hdfsConfig);
    +    }
    +
    +    @Override
    +    public void execute(Tuple tuple) {
    +        try {
    +            byte[] bytes = this.format.format(tuple);
    +            out.write(bytes);
    +            this.offset += bytes.length;
    +
    +            this.collector.ack(tuple);
    --- End diff --
    
    Is there ever a use case to only ack the tuple after it has hit disk?  I 
guess that kind of is what the trident implementations are for.


> Add module for HDFS integration
> -------------------------------
>
>                 Key: STORM-211
>                 URL: https://issues.apache.org/jira/browse/STORM-211
>             Project: Apache Storm (Incubating)
>          Issue Type: Sub-task
>            Reporter: P. Taylor Goetz
>
> Add a module with generic components (storm, trident) for interacting with 
> HDFS:
> - Write to regular and sequence files
> - Core bolts, and Trident state implementation.
> - Integrate with secure (kerberos-enabled) HDFS



--
This message was sent by Atlassian JIRA
(v6.2#6252)

Reply via email to