Repository: hadoop
Updated Branches:
refs/heads/branch-2.7 25b99e481 -> 86c01d90b
HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using hard-links
to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P.
McCabe)
(cherry picked from commit 7b424f938c3c306795d574792b086d84e4f06425)
(cherry picked from commit cbd11681ce8a51d187d91748b67a708681e599de)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86c01d90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86c01d90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86c01d90
Branch: refs/heads/branch-2.7
Commit: 86c01d90ba9eaf92c6c444b9a0dde54131696102
Parents: 25b99e4
Author: Colin Patrick Mccabe <[email protected]>
Authored: Mon Jun 22 14:46:57 2015 -0700
Committer: Colin Patrick Mccabe <[email protected]>
Committed: Mon Jun 22 14:46:57 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/NNUpgradeUtil.java | 18 +-----
.../org/apache/hadoop/hdfs/TestDFSUpgrade.java | 59 +++++++++++++++-----
3 files changed, 49 insertions(+), 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c01d90/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 951a04b..9dfa3d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -31,6 +31,9 @@ Release 2.7.1 - UNRELEASED
HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)
OPTIMIZATIONS
+ HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
+ hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
+ via Colin P. McCabe)
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c01d90/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
index ee651fd..1f10bc4 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
+import java.nio.file.Files;
import java.util.List;
import org.apache.commons.logging.Log;
@@ -127,23 +128,8 @@ public abstract class NNUpgradeUtil {
for (String s : fileNameList) {
File prevFile = new File(tmpDir, s);
- Preconditions.checkState(prevFile.canRead(),
- "Edits log file " + s + " is not readable.");
File newFile = new File(curDir, prevFile.getName());
- Preconditions.checkState(newFile.createNewFile(),
- "Cannot create new edits log file in " + curDir);
- EditLogFileInputStream in = new EditLogFileInputStream(prevFile);
- EditLogFileOutputStream out =
- new EditLogFileOutputStream(conf, newFile, 512*1024);
- FSEditLogOp logOp = in.nextValidOp();
- while (logOp != null) {
- out.write(logOp);
- logOp = in.nextOp();
- }
- out.setReadyToFlush();
- out.flushAndSync(true);
- out.close();
- in.close();
+ Files.createLink(newFile.toPath(), prevFile.toPath());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86c01d90/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index f0a094e..596cc5f 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -30,14 +30,18 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
-import java.util.LinkedList;
+import java.nio.file.Files;
import java.util.List;
import java.util.regex.Pattern;
+import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.inotify.Event;
+import org.apache.hadoop.hdfs.inotify.EventBatch;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
@@ -45,7 +49,11 @@ import
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
@@ -54,6 +62,8 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
+import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;
+
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
@@ -466,31 +476,50 @@ public class TestDFSUpgrade {
log("Normal NameNode upgrade", 1);
File[] created =
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
- List<String> beforeUpgrade = new LinkedList<>();
for (final File createdDir : created) {
List<String> fileNameList =
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
- beforeUpgrade.addAll(fileNameList);
+ for (String fileName : fileNameList) {
+ String tmpFileName = fileName + ".tmp";
+ File existingFile = new File(createdDir, fileName);
+ File tmpFile = new File(createdDir, tmpFileName);
+ Files.move(existingFile.toPath(), tmpFile.toPath());
+ File newFile = new File(createdDir, fileName);
+ Preconditions.checkState(newFile.createNewFile(),
+ "Cannot create new edits log file in " + createdDir);
+ EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
+ HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
+ false);
+ EditLogFileOutputStream out = new EditLogFileOutputStream(conf,
newFile,
+ (int)tmpFile.length());
+ out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
+ FSEditLogOp logOp = in.readOp();
+ while (logOp != null) {
+ out.write(logOp);
+ logOp = in.readOp();
+ }
+ out.setReadyToFlush();
+ out.flushAndSync(true);
+ out.close();
+ Files.delete(tmpFile.toPath());
+ }
}
cluster = createCluster();
- List<String> afterUpgrade = new LinkedList<>();
- for (final File createdDir : created) {
- List<String> fileNameList =
- IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
- afterUpgrade.addAll(fileNameList);
- }
-
- for (String s : beforeUpgrade) {
- assertTrue(afterUpgrade.contains(s));
- }
-
+ DFSInotifyEventInputStream ieis =
+ cluster.getFileSystem().getInotifyEventStream(0);
+ EventBatch batch = ieis.poll();
+ Event[] events = batch.getEvents();
+ assertTrue("Should be able to get transactions before the upgrade.",
+ events.length > 0);
+ assertEquals(events[0].getEventType(), Event.EventType.CREATE);
+ assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
- private static enum EditLogsFilter implements FilenameFilter {
+ private enum EditLogsFilter implements FilenameFilter {
INSTANCE;
@Override