Author: omalley
Date: Mon Jul 21 10:50:53 2008
New Revision: 678511
URL: http://svn.apache.org/viewvc?rev=678511&view=rev
Log:
HADOOP-3762. Fixed FileSystem cache to work with the default port.
Contributed by Doug Cutting.
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=678511&r1=678510&r2=678511&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Jul 21 10:50:53 2008
@@ -674,12 +674,14 @@
HADOOP-3454. Fix Text::find to search only valid byte ranges. (Chad Whipkey
via cdouglas)
- HADOOP-3417. Removes the static configuration variable, commandLineConfig
from
- JobClient. Moves the cli parsing from JobShell to GenericOptionsParser.
- Thus removes the class org.apache.hadoop.mapred.JobShell.
- (Amareshwari Sriramadasu via ddas)
+ HADOOP-3417. Removes the static configuration variable,
+ commandLineConfig from JobClient. Moves the cli parsing from
+ JobShell to GenericOptionsParser. Thus removes the class
+ org.apache.hadoop.mapred.JobShell. (Amareshwari Sriramadasu via
+ ddas)
- HADOOP-2132. Only RUNNING/PREP jobs can be killed. (Jothi Padmanabhan via
ddas)
+ HADOOP-2132. Only RUNNING/PREP jobs can be killed. (Jothi Padmanabhan
+ via ddas)
HADOOP-3476. Code cleanup in fuse-dfs.
(Peter Wyckoff via dhruba)
@@ -709,8 +711,9 @@
HADOOP-3135. Get the system directory from the JobTracker instead of from
the conf. (Subramaniam Krishnan via ddas)
- HADOOP-3503. Fix a race condition when client and namenode start
simultaneous
- recovery of the same block. (dhruba & Tsz Wo (Nicholas), SZE)
+ HADOOP-3503. Fix a race condition when client and namenode start
+ simultaneous recovery of the same block. (dhruba & Tsz Wo
+ (Nicholas), SZE)
HADOOP-3440. Fixes DistributedCache to not create symlinks for paths which
don't have fragments even when createSymLink is true.
@@ -720,7 +723,8 @@
HADOOP-3489. Fix NPE in SafeModeMonitor. (Lohit Vijayarenu via shv)
- HADOOP-3509. Fix NPE in FSNamesystem.close. (Tsz Wo (Nicholas), SZE via
shv)
+ HADOOP-3509. Fix NPE in FSNamesystem.close. (Tsz Wo (Nicholas), SZE via
+ shv)
HADOOP-3491. Name-node shutdown causes InterruptedException in
ResolutionMonitor. (Lohit Vijayarenu via shv)
@@ -881,6 +885,9 @@
HADOOP-3774. Fix typos in shell output. (Tsz Wo (Nicholas), SZE via
cdouglas)
+ HADOOP-3762. Fixed FileSystem cache to work with the default port. (cutting
+ via omalley)
+
Release 0.17.2 - Unreleased
BUG FIXES
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java?rev=678511&r1=678510&r2=678511&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java Mon Jul 21
10:50:53 2008
@@ -60,6 +60,10 @@
/** FileSystem cache */
private static final Cache CACHE = new Cache();
+
+ /** The key this instance is stored under in the cache. */
+ private Cache.Key key;
+
/** Recording statistics per a FileSystem class */
private static final Map<Class<? extends FileSystem>, Statistics>
statisticsTable =
@@ -312,9 +316,9 @@
thisAuthority.equalsIgnoreCase(defaultUri.getAuthority()))
return;
}
- throw new IllegalArgumentException("Wrong FS: "+path+
- ", expected: "+this.getUri());
}
+ throw new IllegalArgumentException("Wrong FS: "+path+
+ ", expected: "+this.getUri());
}
/**
@@ -1237,7 +1241,7 @@
public void close() throws IOException {
// delete all files that were marked as delete-on-exit.
processDeleteOnExit();
- CACHE.remove(new Cache.Key(this), this);
+ CACHE.remove(this.key, this);
}
/** Return the total size of all files in the filesystem.*/
@@ -1341,13 +1345,15 @@
private final Map<Key, FileSystem> map = new HashMap<Key, FileSystem>();
synchronized FileSystem get(URI uri, Configuration conf) throws
IOException{
- FileSystem fs = map.get(new Key(uri, conf));
+ Key key = new Key(uri, conf);
+ FileSystem fs = map.get(key);
if (fs == null) {
fs = createFileSystem(uri, conf);
if (map.isEmpty() && !clientFinalizer.isAlive()) {
Runtime.getRuntime().addShutdownHook(clientFinalizer);
}
- map.put(new Key(fs), fs);
+ fs.key = key;
+ map.put(key, fs);
}
return fs;
}
@@ -1395,10 +1401,6 @@
final String authority;
final String username;
- Key(FileSystem fs) throws IOException {
- this(fs.getUri(), fs.getConf());
- }
-
Key(URI uri, Configuration conf) throws IOException {
scheme = uri.getScheme();
authority = uri.getAuthority();
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=678511&r1=678510&r2=678511&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
Mon Jul 21 10:50:53 2008
@@ -77,6 +77,21 @@
this.workingDir = getHomeDirectory();
}
+ /** Permit paths which explicitly specify the default port. */
+ protected void checkPath(Path path) {
+ URI thisUri = this.getUri();
+ URI thatUri = path.toUri();
+ String thatAuthority = thatUri.getAuthority();
+ if (thatUri.getScheme() != null
+ && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
+ && thatUri.getPort() == NameNode.DEFAULT_PORT
+ && thisUri.getPort() == -1
+ && thatAuthority.substring(0,thatAuthority.indexOf(":"))
+ .equalsIgnoreCase(thisUri.getAuthority()))
+ return;
+ super.checkPath(path);
+ }
+
public Path getWorkingDirectory() {
return workingDir;
}
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java?rev=678511&r1=678510&r2=678511&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java Mon Jul
21 10:50:53 2008
@@ -32,6 +32,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
@@ -506,22 +507,51 @@
}
{
- MiniDFSCluster cluster = null;
try {
- cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
- URI uri = cluster.getFileSystem().getUri();
+ runTestCache(NameNode.DEFAULT_PORT);
+ } catch(java.net.BindException be) {
+ LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
+ + NameNode.DEFAULT_PORT + ")", be);
+ }
+
+ runTestCache(0);
+ }
+ }
+
+ static void runTestCache(int port) throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster(port, conf, 2, true, true, null, null);
+ URI uri = cluster.getFileSystem().getUri();
+ LOG.info("uri=" + uri);
+
+ {
FileSystem fs = FileSystem.get(uri, new Configuration());
checkPath(cluster, fs);
for(int i = 0; i < 100; i++) {
assertTrue(fs == FileSystem.get(uri, new Configuration()));
}
- } finally {
- cluster.shutdown();
}
+
+ if (port == NameNode.DEFAULT_PORT) {
+ //test explicit default port
+ URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(),
+ uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(),
+ uri.getQuery(), uri.getFragment());
+ LOG.info("uri2=" + uri2);
+ FileSystem fs = FileSystem.get(uri2, conf);
+ checkPath(cluster, fs);
+ for(int i = 0; i < 100; i++) {
+ assertTrue(fs == FileSystem.get(uri2, new Configuration()));
+ }
+ }
+ } finally {
+ if (cluster != null) cluster.shutdown();
}
}
- private void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws
IOException {
+ static void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws
IOException {
InetSocketAddress add = cluster.getNameNode().getNameNodeAddress();
// Test upper/lower case
fileSys.checkPath(new Path("hdfs://" + add.getHostName().toUpperCase() +
":" + add.getPort()));