mukund-thakur commented on code in PR #6543: URL: https://github.com/apache/hadoop/pull/6543#discussion_r1572754351
########## hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java: ########## @@ -124,7 +138,32 @@ public Boolean execute() throws IOException { return true; } - // Walk path to root, ensuring closest ancestor is a directory, not file + // if performance creation mode is set, no need to check + // whether the closest ancestor is dir. + if (!performanceCreation) { + verifyFileStatusOfClosestAncestor(); + } + + // if we get here there is no directory at the destination. + // so create one. + + // Create the marker file, delete the parent entries + // if the filesystem isn't configured to retain them + callbacks.createFakeDirectory(dir, false); + return true; + } + + /** + * Verify the file status of the closest ancestor, if it is + * dir, the mkdir operation should proceed. If it is file, + * the mkdir operation should throw error. + * + * @throws IOException If either file status could not be retrieved, + * or if the closest ancestor is a file. + */ + private void verifyFileStatusOfClosestAncestor() throws IOException { + FileStatus fileStatus; + // Walk path to root, ensuring the closest ancestor is a directory, not file Path fPart = dir.getParent(); try { while (fPart != null && !fPart.isRoot()) { Review Comment: I have a basic question here. Shouldn't we be just checking only one level parent? For example, if we are trying to create a/b/c/d/ then a/b/c/ should exist and must not be a file. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org