This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 33c62f8 HDFS-14497. Write lock held by metasave impact following RPC
processing. Contributed by He Xiaoqiao.
33c62f8 is described below
commit 33c62f8f4e94442825fe286c2b18518925d980e6
Author: He Xiaoqiao <[email protected]>
AuthorDate: Thu May 30 13:27:48 2019 -0700
HDFS-14497. Write lock held by metasave impact following RPC processing.
Contributed by He Xiaoqiao.
Signed-off-by: Wei-Chiu Chuang <[email protected]>
---
.../hdfs/server/blockmanagement/BlockManager.java | 2 +-
.../hadoop/hdfs/server/namenode/FSNamesystem.java | 28 ++++++----
.../hadoop/hdfs/server/namenode/TestMetaSave.java | 60 ++++++++++++++++++++++
3 files changed, 79 insertions(+), 11 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4e351c0..9cfa180 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -733,7 +733,7 @@ public class BlockManager implements BlockStatsMXBean {
/** Dump meta data to out. */
public void metaSave(PrintWriter out) {
- assert namesystem.hasWriteLock(); // TODO: block manager read lock and NS
write lock
+ assert namesystem.hasReadLock(); // TODO: block manager read lock and NS
write lock
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
datanodeManager.fetchDatanodes(live, dead, false);
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5c9341f..70b65f3 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -593,6 +593,12 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
private String nameNodeHostName = null;
/**
+ * HDFS-14497: Concurrency control when many metaSave request to write
+ * meta to same out stream after switch to read lock.
+ */
+ private Object metaSaveLock = new Object();
+
+ /**
* Notify that loading of this FSDirectory is complete, and
* it is imageLoaded for use
*/
@@ -1769,24 +1775,26 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
String operationName = "metaSave";
checkSuperuserPrivilege(operationName);
checkOperation(OperationCategory.READ);
- writeLock();
+ readLock();
try {
checkOperation(OperationCategory.READ);
- File file = new File(System.getProperty("hadoop.log.dir"), filename);
- PrintWriter out = new PrintWriter(new BufferedWriter(
- new OutputStreamWriter(Files.newOutputStream(file.toPath()),
- Charsets.UTF_8)));
- metaSave(out);
- out.flush();
- out.close();
+ synchronized(metaSaveLock) {
+ File file = new File(System.getProperty("hadoop.log.dir"), filename);
+ PrintWriter out = new PrintWriter(new BufferedWriter(
+ new OutputStreamWriter(Files.newOutputStream(file.toPath()),
+ Charsets.UTF_8)));
+ metaSave(out);
+ out.flush();
+ out.close();
+ }
} finally {
- writeUnlock(operationName);
+ readUnlock(operationName);
}
logAuditEvent(true, operationName, null);
}
private void metaSave(PrintWriter out) {
- assert hasWriteLock();
+ assert hasReadLock();
long totalInodes = this.dir.totalInodes();
long totalBlocks = this.getBlocksTotal();
out.println(totalInodes + " files and directories, " + totalBlocks
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index 8cc1433..d4748f3 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -27,6 +27,7 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.util.ArrayList;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Supplier;
@@ -215,6 +216,65 @@ public class TestMetaSave {
}
}
+ class MetaSaveThread extends Thread {
+ NamenodeProtocols nnRpc;
+ String filename;
+ public MetaSaveThread(NamenodeProtocols nnRpc, String filename) {
+ this.nnRpc = nnRpc;
+ this.filename = filename;
+ }
+
+ @Override
+ public void run() {
+ try {
+ nnRpc.metaSave(filename);
+ } catch (IOException e) {
+ }
+ }
+ }
+
+ /**
+ * Tests that metasave concurrent output file (not append).
+ */
+ @Test
+ public void testConcurrentMetaSave() throws Exception {
+ ArrayList<MetaSaveThread> threads = new ArrayList<>();
+ for (int i = 0; i < 10; i++) {
+ threads.add(new MetaSaveThread(nnRpc, "metaSaveConcurrent.out.txt"));
+ }
+ for (int i = 0; i < 10; i++) {
+ threads.get(i).start();
+ }
+ for (int i = 0; i < 10; i++) {
+ threads.get(i).join();
+ }
+ // Read output file.
+ FileInputStream fis = null;
+ InputStreamReader isr = null;
+ BufferedReader rdr = null;
+ try {
+ fis = new FileInputStream(getLogFile("metaSaveConcurrent.out.txt"));
+ isr = new InputStreamReader(fis);
+ rdr = new BufferedReader(isr);
+
+ // Validate that file was overwritten (not appended) by checking for
+ // presence of only one "Live Datanodes" line.
+ boolean foundLiveDatanodesLine = false;
+ String line = rdr.readLine();
+ while (line != null) {
+ if (line.startsWith("Live Datanodes")) {
+ if (foundLiveDatanodesLine) {
+ fail("multiple Live Datanodes lines, output file not overwritten");
+ }
+ foundLiveDatanodesLine = true;
+ }
+ line = rdr.readLine();
+ }
+ } finally {
+ IOUtils.cleanup(null, rdr, isr, fis);
+ }
+ }
+
@After
public void tearDown() throws IOException {
if (fileSys != null)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]