ramkrish86 commented on code in PR #21508: URL: https://github.com/apache/flink/pull/21508#discussion_r1052089932
########## flink-filesystems/flink-azure-fs-hadoop/src/main/java/org/apache/flink/fs/azurefs/AzureBlobFsRecoverableDataOutputStream.java: ########## @@ -0,0 +1,299 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.fs.azurefs; + +import org.apache.flink.annotation.Internal; +import org.apache.flink.core.fs.RecoverableFsDataOutputStream; +import org.apache.flink.core.fs.RecoverableWriter; +import org.apache.flink.runtime.fs.hdfs.HadoopFsRecoverable; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; + +import static org.apache.flink.util.Preconditions.checkNotNull; + +/** + * An implementation of the {@link RecoverableFsDataOutputStream} for AzureBlob's file system + * abstraction. + */ +@Internal +public class AzureBlobFsRecoverableDataOutputStream extends RecoverableFsDataOutputStream { + + private static final Logger LOG = + LoggerFactory.getLogger(AzureBlobFsRecoverableDataOutputStream.class); + + private final FileSystem fs; + + private final Path targetFile; + + private final Path tempFile; + + private final FSDataOutputStream out; + + // Not final to override in tests + public static int minBufferLength = 2097152; + + // init to 0. When ever recovery is done add this to the pos. + private long initialFileSize = 0; + + public AzureBlobFsRecoverableDataOutputStream(FileSystem fs, Path targetFile, Path tempFile) + throws IOException { + this.fs = checkNotNull(fs); + this.targetFile = checkNotNull(targetFile); + LOG.debug("The targetFile is {}", targetFile.getName()); + this.tempFile = checkNotNull(tempFile); + LOG.debug("The tempFile is {}", tempFile.getName()); + this.out = fs.create(tempFile); + } + + public AzureBlobFsRecoverableDataOutputStream(FileSystem fs, HadoopFsRecoverable recoverable) + throws IOException { + this.fs = checkNotNull(fs); + this.targetFile = checkNotNull(recoverable.targetFile()); + this.tempFile = checkNotNull(recoverable.tempFile()); + long len = fs.getFileStatus(tempFile).getLen(); + LOG.info("The recoverable offset is {} and the file len is {}", recoverable.offset(), len); + // Happens when we recover from a previously committed offset. Otherwise this is not + // really needed + if (len > recoverable.offset()) { + truncate(fs, recoverable); + } else if (len < recoverable.offset()) { + LOG.error( + "Temp file length {} is less than the expected recoverable offset {}", + len, + recoverable.offset()); + throw new IOException( + "Unable to create recoverable outputstream as length of file " + + len + + " is less than " + + "recoverable offset " + + recoverable.offset()); + } + // In ABFS when we try to append we don't account for the initial file size like we do in + // DFS. + // So we explicitly store this and when we do a persist call we make use of it. + initialFileSize = fs.getFileStatus(tempFile).getLen(); + out = fs.append(tempFile); + LOG.debug("Created a new OS for appending {}", tempFile); + } + + private void truncate(FileSystem fs, HadoopFsRecoverable recoverable) throws IOException { + Path renameTempPath = new Path(tempFile.toString() + ".rename"); + try { + LOG.info( + "Creating the temp rename file {} for truncating the tempFile {}", + renameTempPath, + tempFile); + FSDataOutputStream fsDataOutputStream = fs.create(renameTempPath); + LOG.info("Opening the tempFile {} for truncate", tempFile); + FSDataInputStream fsDis = fs.open(tempFile); + // 2 MB buffers. TODO : Make this configurable + long remaining = recoverable.offset(); + byte[] buf = null; + long dataWritten = 0; + while (remaining != 0) { + if (minBufferLength < remaining) { + buf = new byte[minBufferLength]; + } else { + buf = new byte[(int) remaining]; + } + fsDis.read(buf, 0, buf.length); + remaining -= buf.length; + LOG.info("Bytes remaining to read {}", remaining); + fsDataOutputStream.write(buf); + dataWritten += buf.length; + LOG.info("Successfully wrote {} bytes of data {}", dataWritten); + } + // TODO : Support intermediate flush? + LOG.info("Closing the temp rename file {}", renameTempPath); + fsDataOutputStream.close(); + } catch (IOException e) { + LOG.error( + "Unable to recover. Exception while trying to truncate the temp file {}", + tempFile); + // We cannot recover. This we can control if user does not want this?? + throw e; + } + try { + LOG.info("Deleting the actual temp file {}", tempFile); + fs.delete(tempFile, false); + } catch (IOException e) { + LOG.error("Unable to recover. Error while deleting the temp file {}", tempFile); + // unable to recover. + throw e; + } + rename(fs, renameTempPath); + } + + private void rename(FileSystem fs, Path renameTempPath) throws IOException { + LOG.info("Renaming the temp rename file {} back to tempFile {}", renameTempPath, tempFile); + try { + boolean result = fs.rename(renameTempPath, tempFile); + if (!result) { + LOG.error( + "Unable to recover. Rename operation failed {} to {}", + renameTempPath, + tempFile); + throw new IOException("Unable to recover. Rename operation failed"); + } else { + LOG.info("Rename was successful"); + } + } catch (IOException e) { + LOG.error( + "Unable to recover. Renaming of tempFile did not happen after truncating {} to {}", + renameTempPath, + tempFile); + throw e; + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + out.write(b, off, len); + } + + @Override + public long getPos() throws IOException { + return out.getPos(); + } + + @Override + public void write(int b) throws IOException { + out.write(b); + } + + @Override + public void flush() throws IOException { + out.hflush(); + } + + @Override + public void sync() throws IOException { + out.hflush(); + out.hsync(); + } + + @Override + public RecoverableWriter.ResumeRecoverable persist() throws IOException { + sync(); + return new HadoopFsRecoverable(targetFile, tempFile, getPos() + initialFileSize); + } + + @Override + public Committer closeForCommit() throws IOException { + final long pos = getPos(); + close(); + return new AzureBlobFsRecoverableDataOutputStream.ABFSCommitter( + fs, new HadoopFsRecoverable(targetFile, tempFile, pos + initialFileSize)); + } + + @Override + public void close() throws IOException { + out.close(); + } + + // ------------------------------------------------------------------------ + // Committer + // ------------------------------------------------------------------------ + + /** + * Implementation of a committer for the Hadoop File System abstraction. This implementation + * commits by renaming the temp file to the final file path. The temp file is truncated before + * renaming in case there is trailing garbage data. + */ + static class ABFSCommitter implements Committer { + + private final FileSystem fs; + private final HadoopFsRecoverable recoverable; + + ABFSCommitter(FileSystem fs, HadoopFsRecoverable recoverable) { + this.fs = checkNotNull(fs); + this.recoverable = checkNotNull(recoverable); + } + + @Override + public void commit() throws IOException { + final Path src = recoverable.tempFile(); + final Path dest = recoverable.targetFile(); + final long expectedLength = recoverable.offset(); + FileStatus srcStatus = null; + try { + srcStatus = fs.getFileStatus(src); + } catch (FileNotFoundException fnfe) { + // srcStatus will be null + } catch (IOException e) { + throw new IOException("Cannot clean commit: Staging file does not exist."); + } + if (srcStatus != null) { + LOG.debug( + "The srcStatus is {} and exp length is {}", + srcStatus.getLen(), + expectedLength); + if (srcStatus.getLen() != expectedLength) { + LOG.error( + "The src file {} with length {} does not match the expected length {}", + src, + srcStatus.getLen(), + expectedLength); + throw new IOException( + "The src file " + + src + + " with length " + + srcStatus.getLen() + + " " + + "does not match the expected length " + + expectedLength); + } + try { + fs.rename(src, dest); + } catch (IOException e) { + throw new IOException( + "Committing file by rename failed: " + src + " to " + dest, e); + } + } else if (!fs.exists(dest)) { + // neither exists - that can be a sign of + // - (1) a serious problem (file system loss of data) + // - (2) a recovery of a savepoint that is some time old and the users + // removed the files in the meantime. + + // TODO how to handle this? + // We probably need an option for users whether this should log, + // or result in an exception or unrecoverable exception Review Comment: This I did not handle and left it as it in the HadoopRecoverableDataOS.java. Fine I can throw the exception here. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
