[
https://issues.apache.org/jira/browse/APEXMALHAR-2013?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15201189#comment-15201189
]
ASF GitHub Bot commented on APEXMALHAR-2013:
--------------------------------------------
Github user DT-Priyanka commented on a diff in the pull request:
https://github.com/apache/incubator-apex-malhar/pull/216#discussion_r56626208
--- Diff: library/src/main/java/com/datatorrent/lib/io/fs/FileStitcher.java
---
@@ -0,0 +1,408 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.datatorrent.lib.io.fs;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Queue;
+
+import javax.validation.constraints.NotNull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+
+import com.google.common.collect.Queues;
+
+import com.datatorrent.api.Context;
+import com.datatorrent.api.Context.DAGContext;
+import com.datatorrent.api.DefaultOutputPort;
+import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
+import com.datatorrent.lib.io.fs.Synchronizer.StitchBlock;
+import com.datatorrent.lib.io.fs.Synchronizer.StitchedFileMetaData;
+
+/**
+ * This is generic File Stitcher which can be used to merge data from one
or
+ * more files into single stitched file. StitchedFileMetaData defines
+ * constituents of the stitched file.
+ *
+ * This class uses Reconciler to
+ */
+public class FileStitcher<T extends StitchedFileMetaData> extends
AbstractReconciler<T, T>
+{
+ /**
+ * Filesystem on which application is running
+ */
+ protected transient FileSystem appFS;
+
+ /**
+ * Destination file system
+ */
+ protected transient FileSystem outputFS;
+
+ /**
+ * Path for destination directory
+ */
+ @NotNull
+ protected String filePath;
+
+ /**
+ * Path for blocks directory
+ */
+ protected transient String blocksDir;
+
+ protected static final String PART_FILE_EXTENTION = "._COPYING_";
+
+ /**
+ * Queue maintaining successful files
+ */
+ protected Queue<T> successfulFiles = Queues.newLinkedBlockingQueue();
+
+ /**
+ * Queue maintaining skipped files
+ */
+ protected Queue<T> skippedFiles = Queues.newLinkedBlockingQueue();
+
+ /**
+ * Queue maintaining failed files
+ */
+ protected Queue<T> failedFiles = Queues.newLinkedBlockingQueue();
+
+ /**
+ * Output port for emitting completed stitched files metadata
+ */
+ @OutputPortFieldAnnotation(optional = true)
+ public final transient DefaultOutputPort<T> completedFilesMetaOutput =
new DefaultOutputPort<T>();
+
+ private boolean writeChecksum = true;
+ protected transient Path tempOutFilePath;
+
+ @Override
+ public void setup(Context.OperatorContext context)
+ {
+
+ blocksDir = context.getValue(DAGContext.APPLICATION_PATH) +
Path.SEPARATOR + BlockWriter.SUBDIR_BLOCKS;
--- End diff --
same as above, can we have this blocksDir parameter configurable?
> HDFS output module for file copy
> --------------------------------
>
> Key: APEXMALHAR-2013
> URL: https://issues.apache.org/jira/browse/APEXMALHAR-2013
> Project: Apache Apex Malhar
> Issue Type: Task
> Reporter: Yogi Devendra
> Assignee: Yogi Devendra
>
> To write files to HDFS using block-by-block approach.
> Main use-case being to copy the files. Thus, original sequence of blocks has
> to be maintained.
> To achieve this goal, this module would use information emitted by HDFS
> input module (APEXMALHAR-2008) viz. FileMetaData, BlockMetaData, BlockData.
>
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)