zhaohehuhu commented on code in PR #2579:
URL: https://github.com/apache/celeborn/pull/2579#discussion_r1682181106
##########
worker/src/main/java/org/apache/celeborn/service/deploy/worker/storage/PartitionDataWriter.java:
##########
@@ -166,24 +170,32 @@ public PartitionDataWriter(
}
public void initFileChannelsForDiskFile() throws IOException {
- if (!this.diskFileInfo.isHdfs()) {
+ if (!this.diskFileInfo.isDFS()) {
this.flusherBufferSize = localFlusherBufferSize;
channel =
FileChannelUtils.createWritableFileChannel(this.diskFileInfo.getFilePath());
} else {
- this.flusherBufferSize = hdfsFlusherBufferSize;
- // We open the stream and close immediately because HDFS output stream
will
+ FileSystem hadoopFs = null;
Review Comment:
OK. Done
##########
worker/src/main/java/org/apache/celeborn/service/deploy/worker/storage/PartitionFilesSorter.java:
##########
@@ -588,25 +597,28 @@ public DiskFileInfo resolve(
fileId,
() -> {
FileChannel indexChannel = null;
- FSDataInputStream hdfsIndexStream = null;
- boolean isHdfs = Utils.isHdfsPath(indexFilePath);
+ FSDataInputStream dfsIndexStream = null;
+ boolean isDfs = Utils.isHdfsPath(indexFilePath) ||
Utils.isS3Path(indexFilePath);
+ boolean isS3 = Utils.isS3Path(indexFilePath);
int indexSize;
try {
- if (isHdfs) {
- hdfsIndexStream = StorageManager.hadoopFs().open(new
Path(indexFilePath));
- indexSize =
- (int)
- StorageManager.hadoopFs()
- .getFileStatus(new Path(indexFilePath))
- .getLen();
+ if (isDfs) {
Review Comment:
OK. Done
##########
worker/src/main/java/org/apache/celeborn/service/deploy/worker/storage/PartitionFilesSorter.java:
##########
@@ -673,11 +690,17 @@ class FileSorter {
indexFile.delete();
}
} else {
- if (StorageManager.hadoopFs().exists(fileInfo.getHdfsSortedPath())) {
- StorageManager.hadoopFs().delete(fileInfo.getHdfsSortedPath(),
false);
+ FileSystem hadoopFs = null;
Review Comment:
OK. Done
##########
worker/src/main/java/org/apache/celeborn/service/deploy/worker/storage/ReducePartitionDataWriter.java:
##########
@@ -88,17 +90,20 @@ public synchronized long close() throws IOException {
},
() -> {
if (diskFileInfo != null) {
- if (diskFileInfo.isHdfs()) {
- if (StorageManager.hadoopFs()
- .exists(diskFileInfo.getHdfsPeerWriterSuccessPath())) {
-
StorageManager.hadoopFs().delete(diskFileInfo.getHdfsPath(), false);
+ if (diskFileInfo.isDFS()) {
Review Comment:
OK. Done
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]