Author: ddas
Date: Mon Feb 23 08:42:34 2009
New Revision: 746925
URL: http://svn.apache.org/viewvc?rev=746925&view=rev
Log:
HADOOP-5103. FileInputFormat now reuses the clusterMap network topology object
and that brings down the log messages in the JobClient to do with
NetworkTopology.add significantly. Contributed by Jothi Padmanabhan.
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/core/org/apache/hadoop/net/NetworkTopology.java
hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestGetSplitHosts.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=746925&r1=746924&r2=746925&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Feb 23 08:42:34 2009
@@ -208,6 +208,10 @@
HADOOP-5292. Fix NPE in KFS::getBlockLocations. (Sriram Rao via lohit)
+ HADOOP-5103. FileInputFormat now reuses the clusterMap network topology
object
+ and that brings down the log messages in the JobClient to do with
+ NetworkTopology.add significantly. (Jothi Padmanabhan via ddas)
+
Release 0.20.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/net/NetworkTopology.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/net/NetworkTopology.java?rev=746925&r1=746924&r2=746925&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/net/NetworkTopology.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/net/NetworkTopology.java Mon
Feb 23 08:42:34 2009
@@ -316,7 +316,6 @@
throw new IllegalArgumentException(
"Not allow to add an inner node: "+NodeBase.getPath(node));
}
- LOG.info("Adding a new node: "+NodeBase.getPath(node));
netlock.writeLock().lock();
try {
Node rack = getNode(node.getNetworkLocation());
@@ -326,6 +325,7 @@
+ " at an illegal network
location");
}
if (clusterMap.add(node)) {
+ LOG.info("Adding a new node: "+NodeBase.getPath(node));
if (rack == null) {
numOfRacks++;
}
Modified:
hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java?rev=746925&r1=746924&r2=746925&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java
(original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/FileInputFormat.java
Mon Feb 23 08:42:34 2009
@@ -214,6 +214,7 @@
// generate splits
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
+ NetworkTopology clusterMap = new NetworkTopology();
for (FileStatus file: files) {
Path path = file.getPath();
FileSystem fs = path.getFileSystem(job);
@@ -226,7 +227,7 @@
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
String[] splitHosts = getSplitHosts(blkLocations,
- length-bytesRemaining, splitSize);
+ length-bytesRemaining, splitSize, clusterMap);
splits.add(new FileSplit(path, length-bytesRemaining, splitSize,
splitHosts));
bytesRemaining -= splitSize;
@@ -237,7 +238,7 @@
blkLocations[blkLocations.length-1].getHosts()));
}
} else if (length != 0) {
- String[] splitHosts = getSplitHosts(blkLocations,0,length);
+ String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap);
splits.add(new FileSplit(path, 0, length, splitHosts));
} else {
//Create empty hosts array for zero length files
@@ -417,7 +418,8 @@
* @throws IOException
*/
protected String[] getSplitHosts(BlockLocation[] blkLocations,
- long offset, long splitSize) throws IOException {
+ long offset, long splitSize, NetworkTopology clusterMap)
+ throws IOException {
int startIndex = getBlockIndex(blkLocations, offset);
@@ -442,7 +444,6 @@
long bytesInLastBlock = bytesInThisBlock;
int endIndex = index - 1;
- NetworkTopology clusterMap = new NetworkTopology();
Map <Node,NodeInfo> hostsMap = new IdentityHashMap<Node,NodeInfo>();
Map <Node,NodeInfo> racksMap = new IdentityHashMap<Node,NodeInfo>();
String [] allTopos = new String[0];
@@ -486,6 +487,11 @@
if (node == null) {
node = new NodeBase(topo);
clusterMap.add(node);
+ }
+
+ nodeInfo = hostsMap.get(node);
+
+ if (nodeInfo == null) {
nodeInfo = new NodeInfo(node);
hostsMap.put(node,nodeInfo);
parentNode = node.getParent();
Modified:
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestGetSplitHosts.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestGetSplitHosts.java?rev=746925&r1=746924&r2=746925&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestGetSplitHosts.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestGetSplitHosts.java
Mon Feb 23 08:42:34 2009
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.net.NetworkTopology;
+
import junit.framework.TestCase;
public class TestGetSplitHosts extends TestCase {
@@ -28,6 +30,7 @@
int block1Size = 100, block2Size = 150, block3Size = 75;
int fileSize = block1Size + block2Size + block3Size;
int replicationFactor = 3;
+ NetworkTopology clusterMap = new NetworkTopology();
BlockLocation[] bs = new BlockLocation[numBlocks];
@@ -72,7 +75,7 @@
SequenceFileInputFormat< String, String> sif =
new SequenceFileInputFormat<String,String>();
- String [] hosts = sif.getSplitHosts(bs, 0, fileSize);
+ String [] hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
// Contributions By Racks are
// Rack1 175
@@ -93,7 +96,7 @@
bs[2] = new BlockLocation(block3Names,block3Hosts,block1Size+block2Size,
block3Size);
- hosts = sif.getSplitHosts(bs, 0, fileSize);
+ hosts = sif.getSplitHosts(bs, 0, fileSize, clusterMap);
// host1 makes the highest contribution among all hosts
// So, that should be returned before others