HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc7a0616
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc7a0616
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc7a0616

Branch: refs/heads/HDFS-7285
Commit: dc7a061668a3f4d86fe1b07a40d46774b5386938
Parents: 2bc0a4f
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Aug 14 14:42:43 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Aug 14 14:42:43 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 ++
 .../hdfs/server/namenode/FSDirConcatOp.java     |  5 ++--
 .../hdfs/server/namenode/TestHDFSConcat.java    | 24 ++++++++++++++++----
 3 files changed, 24 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a0616/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index be799af..20b5467 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1141,6 +1141,8 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)
 
+    HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a0616/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index bb00130..786284d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.StorageType;
@@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.Set;
 import java.util.List;
 
@@ -103,7 +104,7 @@ class FSDirConcatOp {
   private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
       INodesInPath targetIIP, FSPermissionChecker pc) throws IOException {
     // to make sure no two files are the same
-    Set<INodeFile> si = new HashSet<>();
+    Set<INodeFile> si = new LinkedHashSet<>();
     final INodeFile targetINode = targetIIP.getLastINode().asFile();
     final INodeDirectory targetParent = targetINode.getParent();
     // now check the srcs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a0616/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
index e1c3c0f..4685eb9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
@@ -111,18 +111,21 @@ public class TestHDFSConcat {
     long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
        
     Path [] files = new Path[numFiles];
-    byte [] [] bytes = new byte [numFiles][(int)fileLen];
+    byte[][] bytes = new byte[numFiles + 1][(int) fileLen];
     LocatedBlocks [] lblocks = new LocatedBlocks[numFiles];
     long [] lens = new long [numFiles];
     
-    
+    stm = dfs.open(trgPath);
+    stm.readFully(0, bytes[0]);
+    stm.close();
     int i;
     for(i=0; i<files.length; i++) {
       files[i] = new Path("/file"+i);
       Path path = files[i];
       System.out.println("Creating file " + path);
-      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
-    
+
+      // make files with different content
+      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, i);
       fStatus = nn.getFileInfo(path.toUri().getPath());
       lens[i] = fStatus.getLen();
       assertEquals(trgLen, lens[i]); // file of the same length.
@@ -131,7 +134,7 @@ public class TestHDFSConcat {
       
       //read the file
       stm = dfs.open(path);
-      stm.readFully(0, bytes[i]);
+      stm.readFully(0, bytes[i + 1]);
       //bytes[i][10] = 10;
       stm.close();
     }
@@ -153,6 +156,17 @@ public class TestHDFSConcat {
     // check count update
     ContentSummary cBefore = dfs.getContentSummary(trgPath.getParent());
     
+    // resort file array, make INode id not sorted.
+    for (int j = 0; j < files.length / 2; j++) {
+      Path tempPath = files[j];
+      files[j] = files[files.length - 1 - j];
+      files[files.length - 1 - j] = tempPath;
+
+      byte[] tempBytes = bytes[1 + j];
+      bytes[1 + j] = bytes[files.length - 1 - j + 1];
+      bytes[files.length - 1 - j + 1] = tempBytes;
+    }
+
     // now concatenate
     dfs.concat(trgPath, files);
     

Reply via email to