Author: omalley
Date: Mon Jun 30 09:32:58 2008
New Revision: 672808
URL: http://svn.apache.org/viewvc?rev=672808&view=rev
Log:
HADOOP-3667. Remove the following deprecated methods from JobConf:
addInputPath(Path)
getInputPaths()
getMapOutputCompressionType()
getOutputPath()
getSystemDir()
setInputPath(Path)
setMapOutputCompressionType(CompressionType style)
setOutputPath(Path)
Contributed by Amareshwari Sriramadasu.
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/core/org/apache/hadoop/io/SequenceFile.java
hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobConf.java
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=672808&r1=672807&r2=672808&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Jun 30 09:32:58 2008
@@ -8,6 +8,17 @@
functionality, which was necessary to providing backwards
compatible combiner semantics for 0.18. (cdouglas via omalley)
+ HADOOP-3667. Remove the following deprecated methods from JobConf:
+ addInputPath(Path)
+ getInputPaths()
+ getMapOutputCompressionType()
+ getOutputPath()
+ getSystemDir()
+ setInputPath(Path)
+ setMapOutputCompressionType(CompressionType style)
+ setOutputPath(Path)
+ (Amareshwari Sriramadasu via omalley)
+
NEW FEATURES
HADOOP-3341. Allow streaming jobs to specify the field separator for map
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/io/SequenceFile.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/io/SequenceFile.java?rev=672808&r1=672807&r2=672808&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/io/SequenceFile.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/io/SequenceFile.java Mon Jun
30 09:32:58 2008
@@ -221,8 +221,7 @@
* Get the compression type for the reduce outputs
* @param job the job config to look in
* @return the kind of compression to use
- * @deprecated Use [EMAIL PROTECTED]
org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
- * to get [EMAIL PROTECTED] CompressionType} for intermediate
map-outputs or
+ * @deprecated Use
* [EMAIL PROTECTED]
org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
* to get [EMAIL PROTECTED] CompressionType} for job-outputs.
*/
@@ -239,8 +238,6 @@
* @param val the new compression type (none, block, record)
* @deprecated Use the one of the many SequenceFile.createWriter methods to
specify
* the [EMAIL PROTECTED] CompressionType} while creating the
[EMAIL PROTECTED] SequenceFile} or
- * [EMAIL PROTECTED]
org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
- * to specify the [EMAIL PROTECTED] CompressionType} for
intermediate map-outputs or
* [EMAIL PROTECTED]
org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf,
org.apache.hadoop.io.SequenceFile.CompressionType)}
* to specify the [EMAIL PROTECTED] CompressionType} for
job-outputs.
* or
Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobConf.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobConf.java?rev=672808&r1=672807&r2=672808&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobConf.java
(original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobConf.java Mon Jun
30 09:32:58 2008
@@ -21,13 +21,9 @@
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.StringTokenizer;
-
import java.net.URL;
import java.net.URLDecoder;
+import java.util.Enumeration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -38,7 +34,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.lib.IdentityMapper;
@@ -185,17 +180,6 @@
}
}
- /**
- * @deprecated Use [EMAIL PROTECTED] JobClient#getSystemDir()} instead.
- * Get the system directory where job-specific files are to be placed.
- *
- * @return the system directory where job-specific files are to be placed.
- */
- @Deprecated
- public Path getSystemDir() {
- return new Path(get("mapred.system.dir", "/tmp/hadoop/mapred/system"));
- }
-
public String[] getLocalDirs() throws IOException {
return getStrings("mapred.local.dir");
}
@@ -223,51 +207,6 @@
}
/**
- * Set the [EMAIL PROTECTED] Path} of the input directory for the map-reduce
job.
- *
- * @param dir the [EMAIL PROTECTED] Path} of the input directory for the
map-reduce job.
- * @deprecated Use [EMAIL PROTECTED] FileInputFormat#setInputPaths(JobConf,
Path...)} or
- * [EMAIL PROTECTED] FileInputFormat#setInputPaths(JobConf,
String)}
- */
- @Deprecated
- public void setInputPath(Path dir) {
- dir = new Path(getWorkingDirectory(), dir);
- set("mapred.input.dir", dir.toString());
- }
-
- /**
- * Add a [EMAIL PROTECTED] Path} to the list of inputs for the map-reduce
job.
- *
- * @param dir [EMAIL PROTECTED] Path} to be added to the list of inputs for
- * the map-reduce job.
- * @deprecated Use [EMAIL PROTECTED] FileInputFormat#addInputPath(JobConf,
Path)} or
- * [EMAIL PROTECTED] FileInputFormat#addInputPaths(JobConf,
String)}
- */
- @Deprecated
- public void addInputPath(Path dir) {
- dir = new Path(getWorkingDirectory(), dir);
- String dirs = get("mapred.input.dir");
- set("mapred.input.dir", dirs == null ? dir.toString() : dirs + "," + dir);
- }
-
- /**
- * Get the list of input [EMAIL PROTECTED] Path}s for the map-reduce job.
- *
- * @return the list of input [EMAIL PROTECTED] Path}s for the map-reduce job.
- * @deprecated Use [EMAIL PROTECTED] FileInputFormat#getInputPaths(JobConf)}
- */
- @Deprecated
- public Path[] getInputPaths() {
- String dirs = get("mapred.input.dir", "");
- ArrayList<Object> list = Collections.list(new StringTokenizer(dirs, ","));
- Path[] result = new Path[list.size()];
- for (int i = 0; i < list.size(); i++) {
- result[i] = new Path((String)list.get(i));
- }
- return result;
- }
-
- /**
* Get the reported username for this job.
*
* @return the username
@@ -361,38 +300,6 @@
}
/**
- * @deprecated Use [EMAIL PROTECTED]
FileOutputFormat#getOutputPath(JobConf)} or
- * [EMAIL PROTECTED]
FileOutputFormat#getWorkOutputPath(JobConf)}
- * Get the [EMAIL PROTECTED] Path} to the output directory for the
map-reduce job.
- *
- * @return the [EMAIL PROTECTED] Path} to the output directory for the
map-reduce job.
- */
- @Deprecated
- public Path getOutputPath() {
- // this return context sensitive value for output path
- // Returns task's temporary output path while task's execution
- // Otherwise returns the output path that was set.
- Path workOutputDir = FileOutputFormat.getWorkOutputPath(this);
- if (workOutputDir != null) {
- return workOutputDir;
- }
- else return FileOutputFormat.getOutputPath(this);
- }
-
- /**
- * @deprecated Use [EMAIL PROTECTED] FileOutputFormat#setOutputPath(JobConf,
Path)}
- * Set the [EMAIL PROTECTED] Path} of the output directory for the
map-reduce job.
- *
- * <p><i>Note</i>:
- * </p>
- * @param dir the [EMAIL PROTECTED] Path} of the output directory for the
map-reduce job.
- */
- @Deprecated
- public void setOutputPath(Path dir) {
- FileOutputFormat.setOutputPath(this, dir);
- }
-
- /**
* Get the [EMAIL PROTECTED] InputFormat} implementation for the map-reduce
job,
* defaults to [EMAIL PROTECTED] TextInputFormat} if not specified explicity.
*
@@ -459,39 +366,6 @@
}
/**
- * Set the [EMAIL PROTECTED] CompressionType} for the map outputs.
- *
- * @param style the [EMAIL PROTECTED] CompressionType} to control how the
map outputs
- * are compressed.
- * @deprecated [EMAIL PROTECTED] CompressionType} is no longer valid for
intermediate
- * map-outputs.
- */
- @Deprecated
- public void setMapOutputCompressionType(CompressionType style) {
- setCompressMapOutput(true);
- set("mapred.map.output.compression.type", style.toString());
- LOG.warn("SequenceFile compression is no longer valid for intermediate " +
- "map-outputs!");
- }
-
- /**
- * Get the [EMAIL PROTECTED] CompressionType} for the map outputs.
- *
- * @return the [EMAIL PROTECTED] CompressionType} for map outputs,
defaulting to
- * [EMAIL PROTECTED] CompressionType#RECORD}.
- * @deprecated [EMAIL PROTECTED] CompressionType} is no longer valid for
intermediate
- * map-outputs.
- */
- @Deprecated
- public CompressionType getMapOutputCompressionType() {
- String val = get("mapred.map.output.compression.type",
- CompressionType.RECORD.toString());
- LOG.warn("SequenceFile compression is no longer valid for intermediate " +
- "map-outputs!");
- return CompressionType.valueOf(val);
- }
-
- /**
* Set the given class as the [EMAIL PROTECTED] CompressionCodec} for the
map outputs.
*
* @param codecClass the [EMAIL PROTECTED] CompressionCodec} class that will
compress
Modified:
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java?rev=672808&r1=672807&r2=672808&view=diff
==============================================================================
---
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
(original)
+++
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
Mon Jun 30 09:32:58 2008
@@ -23,7 +23,6 @@
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
-import org.apache.hadoop.mapred.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
@@ -70,7 +69,7 @@
Text cmpval = new Text();
DataInputBuffer buf = new DataInputBuffer();
final int NUM_SPLITS = 3;
- job.setInputPath(file);
+ FileInputFormat.setInputPaths(job, file);
for (InputSplit split : bformat.getSplits(job, NUM_SPLITS)) {
RecordReader<BytesWritable,BytesWritable> reader =
bformat.getRecordReader(split, job, Reporter.NULL);