Author: cdouglas
Date: Wed Jul 16 14:36:01 2008
New Revision: 677438
URL: http://svn.apache.org/viewvc?rev=677438&view=rev
Log:
HADOOP-3720. Re-read the config file when dfsadmin -refreshNodes is invoked
so dfs.hosts and dfs.hosts.exclude are observed. Contributed by lohit
vijayarenu.
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/core/org/apache/hadoop/util/HostsFileReader.java
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Jul 16 14:36:01 2008
@@ -120,6 +120,10 @@
HADOOP-3721. Refactor CompositeRecordReader and related mapred.join classes
to make them clearer. (cdouglas)
+ HADOOP-3720. Re-read the config file when dfsadmin -refreshNodes is invoked
+ so dfs.hosts and dfs.hosts.exclude are observed. (lohit vijayarenu via
+ cdouglas)
+
Release 0.18.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/util/HostsFileReader.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/util/HostsFileReader.java?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/util/HostsFileReader.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/util/HostsFileReader.java Wed
Jul 16 14:36:01 2008
@@ -63,7 +63,7 @@
}
}
- public void refresh() throws IOException {
+ public synchronized void refresh() throws IOException {
includes.clear();
excludes.clear();
@@ -83,4 +83,18 @@
return excludes;
}
+ public synchronized void setIncludesFile(String includesFile) {
+ this.includesFile = includesFile;
+ }
+
+ public synchronized void setExcludesFile(String excludesFile) {
+ this.excludesFile = excludesFile;
+ }
+
+ public synchronized void updateFileNames(String includesFile,
+ String excludesFile)
+ throws IOException {
+ setIncludesFile(includesFile);
+ setExcludesFile(excludesFile);
+ }
}
Modified:
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml
(original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_design.xml
Wed Jul 16 14:36:01 2008
@@ -313,7 +313,7 @@
<td> Generate a list of DataNodes </td> <td> <code>bin/hadoop
dfsadmin -report</code> </td>
</tr>
<tr>
- <td> Decommission DataNode <code>datanodename</code> </td><td>
<code>bin/hadoop dfsadmin -decommission datanodename</code> </td>
+ <td> Recommission or decommission DataNode(s) </td><td>
<code>bin/hadoop dfsadmin -refreshNodes</code> </td>
</tr>
</table>
</section>
Modified:
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
---
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
(original)
+++
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
Wed Jul 16 14:36:01 2008
@@ -190,6 +190,17 @@
<code>-finalizeUpgrade</code>
: removes previous backup of the cluster made during last
upgrade.
</li>
+ <li>
+ <code>-refreshNodes</code>
+ : Updates the set of hosts allowed to connect to namenode.
+ Re-reads the config file to update values defined by dfs.hosts and
+ dfs.host.exclude and reads the entires (hostnames) in those files.
+ Each entry not defined in dfs.hosts but in dfs.hosts.exclude
+ is decommissioned. Each entry defined in dfs.hosts and also in
+ dfs.host.exclude is stopped from decommissioning if it has aleady
+ been marked for decommission. Entires not present in both the lists
+ are decommissioned.
+ </li>
</ul>
<p>
For command usage, see <a
href="commands_manual.html#dfsadmin">dfsadmin command</a>.
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Wed Jul 16 14:36:01 2008
@@ -3480,6 +3480,7 @@
}
/**
+ * Rereads the config to get hosts and exclude list file names.
* Rereads the files to update the hosts and exclude lists. It
* checks if any of the hosts have changed states:
* 1. Added to hosts --> no further work needed here.
@@ -3487,8 +3488,14 @@
* 3. Added to exclude --> start decommission.
* 4. Removed from exclude --> stop decommission.
*/
- void refreshNodes() throws IOException {
+ public void refreshNodes(Configuration conf) throws IOException {
checkSuperuserPrivilege();
+ // Reread the config to get dfs.hosts and dfs.hosts.exclude filenames.
+ // Update the file names and refresh internal includes and excludes list
+ if (conf == null)
+ conf = new Configuration();
+ hostsReader.updateFileNames(conf.get("dfs.hosts",""),
+ conf.get("dfs.hosts.exclude", ""));
hostsReader.refresh();
synchronized (this) {
for (Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator();
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Wed Jul 16 14:36:01 2008
@@ -521,10 +521,11 @@
/*
* Refresh the list of datanodes that the namenode should allow to
- * connect. Uses the files list in the configuration to update the list.
+ * connect. Re-reads conf by creating new Configuration object and
+ * uses the files list in the configuration to update the list.
*/
public void refreshNodes() throws IOException {
- namesystem.refreshNodes();
+ namesystem.refreshNodes(new Configuration());
}
/**
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Wed
Jul 16 14:36:01 2008
@@ -305,9 +305,16 @@
"\t\tcondition. Safe mode can also be entered manually, but then\n" +
"\t\tit can only be turned off manually as well.\n";
- String refreshNodes = "-refreshNodes: \tRe-read the hosts and exclude
files to update the set\n" +
- "\t\tof Datanodes that are allowed to connect to the Namenode\n" +
- "\t\tand those that should be decommissioned of recommissioned.\n";
+ String refreshNodes = "-refreshNodes: \tUpdates the set of hosts allowed "
+
+ "to connect to namenode.\n\n" +
+ "\t\tRe-reads the config file to update values defined by \n" +
+ "\t\tdfs.hosts and dfs.host.exclude and reads the \n" +
+ "\t\tentires (hostnames) in those files.\n\n" +
+ "\t\tEach entry not defined in dfs.hosts but in \n" +
+ "\t\tdfs.hosts.exclude is decommissioned. Each entry defined \n" +
+ "\t\tin dfs.hosts and also in dfs.host.exclude is stopped from \n" +
+ "\t\tdecommissioning if it has aleady been marked for decommission.\n" +
+ "\t\tEntires not present in both the lists are decommissioned.\n";
String finalizeUpgrade = "-finalizeUpgrade: Finalize upgrade of DFS.\n" +
"\t\tDatanodes delete their previous version working directories,\n" +
Modified:
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java?rev=677438&r1=677437&r2=677438&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java Wed
Jul 16 14:36:01 2008
@@ -28,6 +28,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -170,7 +171,9 @@
/*
* decommission one random node.
*/
- private String decommissionNode(DFSClient client,
+ private String decommissionNode(NameNode namenode,
+ Configuration conf,
+ DFSClient client,
FileSystem filesys,
FileSystem localFileSys)
throws IOException {
@@ -195,7 +198,7 @@
ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
nodes.add(nodename);
writeConfigFile(localFileSys, excludeFile, nodes);
- dfs.refreshNodes();
+ namenode.namesystem.refreshNodes(conf);
return nodename;
}
@@ -303,7 +306,8 @@
replicas + " replicas.");
checkFile(fileSys, file1, replicas);
printFileLocations(fileSys, file1);
- String downnode = decommissionNode(client, fileSys, localFileSys);
+ String downnode = decommissionNode(cluster.getNameNode(), conf,
+ client, fileSys, localFileSys);
decommissionedNodes.add(downnode);
waitNodeState(fileSys, downnode, NodeState.DECOMMISSIONED);
checkFile(fileSys, file1, replicas, downnode);