Author: harsh
Date: Tue Oct 9 13:43:51 2012
New Revision: 1396035
URL: http://svn.apache.org/viewvc?rev=1396035&view=rev
Log:
MAPREDUCE-3678. The Map tasks logs should have the value of input split it
processed. Contributed by Harsh J. (harsh)
Modified:
hadoop/common/branches/branch-1/CHANGES.txt
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/MapTask.java
Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1396035&r1=1396034&r2=1396035&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Tue Oct 9 13:43:51 2012
@@ -84,6 +84,9 @@ Release 1.2.0 - unreleased
MAPREDUCE-4556. FairScheduler: PoolSchedulable#updateDemand() has
potential
redundant computation (kkambatl via tucu)
+ MAPREDUCE-3678. The Map tasks logs should have the value of input
+ split it processed. (harsh)
+
OPTIMIZATIONS
HDFS-2533. Backport: Remove needless synchronization on some FSDataSet
Modified:
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/MapTask.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/MapTask.java?rev=1396035&r1=1396034&r2=1396035&view=diff
==============================================================================
---
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/MapTask.java
(original)
+++
hadoop/common/branches/branch-1/src/mapred/org/apache/hadoop/mapred/MapTask.java
Tue Oct 9 13:43:51 2012
@@ -454,6 +454,7 @@ class MapTask extends Task {
job.setLong("map.input.start", fileSplit.getStart());
job.setLong("map.input.length", fileSplit.getLength());
}
+ LOG.info("Processing split: " + inputSplit);
}
static class NewTrackingRecordReader<K,V>
@@ -727,6 +728,7 @@ class MapTask extends Task {
org.apache.hadoop.mapreduce.InputSplit split = null;
split = getSplitDetails(new Path(splitIndex.getSplitLocation()),
splitIndex.getStartOffset());
+ LOG.info("Processing split: " + split);
org.apache.hadoop.mapreduce.RecordReader<INKEY,INVALUE> input =
new NewTrackingRecordReader<INKEY,INVALUE>