Author: daijy
Date: Tue Dec 22 01:30:12 2009
New Revision: 893053
URL: http://svn.apache.org/viewvc?rev=893053&view=rev
Log:
PIG-1165: Signature of loader does not set correctly for order by
Modified:
hadoop/pig/trunk/CHANGES.txt
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/SampleOptimizer.java
hadoop/pig/trunk/test/findbugsExcludeFile.xml
hadoop/pig/trunk/test/org/apache/pig/test/TestPruneColumn.java
Modified: hadoop/pig/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/CHANGES.txt?rev=893053&r1=893052&r2=893053&view=diff
==============================================================================
--- hadoop/pig/trunk/CHANGES.txt (original)
+++ hadoop/pig/trunk/CHANGES.txt Tue Dec 22 01:30:12 2009
@@ -315,6 +315,8 @@
PIG-1144: set default_parallelism construct does not set the number of
reducers correctly (daijy)
+PIG-1165: Signature of loader does not set correctly for order by (daijy)
+
Release 0.5.0
INCOMPATIBLE CHANGES
Modified:
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/SampleOptimizer.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/SampleOptimizer.java?rev=893053&r1=893052&r2=893053&view=diff
==============================================================================
---
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/SampleOptimizer.java
(original)
+++
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/SampleOptimizer.java
Tue Dec 22 01:30:12 2009
@@ -180,7 +180,7 @@
rslargs[1] = load.getLFile().getFuncSpec().getCtorArgs()[1];
FileSpec fs = new FileSpec(predFs.getFileName(),new FuncSpec(loadFunc,
rslargs));
POLoad newLoad = new
POLoad(load.getOperatorKey(),load.getRequestedParallelism(), fs,
load.isSplittable());
- newLoad.setSignature(load.getSignature());
+ newLoad.setSignature(predLoad.getSignature());
try {
mr.mapPlan.replace(load, newLoad);
@@ -196,7 +196,7 @@
// Second, replace the loader in our successor with whatever the
originally used loader was.
fs = new FileSpec(predFs.getFileName(), predFs.getFuncSpec());
newLoad = new POLoad(succLoad.getOperatorKey(),
succLoad.getRequestedParallelism(), fs, succLoad.isSplittable());
- newLoad.setSignature(succLoad.getSignature());
+ newLoad.setSignature(predLoad.getSignature());
try {
succ.mapPlan.replace(succLoad, newLoad);
} catch (PlanException e) {
Modified: hadoop/pig/trunk/test/findbugsExcludeFile.xml
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/findbugsExcludeFile.xml?rev=893053&r1=893052&r2=893053&view=diff
==============================================================================
--- hadoop/pig/trunk/test/findbugsExcludeFile.xml (original)
+++ hadoop/pig/trunk/test/findbugsExcludeFile.xml Tue Dec 22 01:30:12 2009
@@ -324,5 +324,9 @@
<Field name = "res" />
<Bug pattern="MF_CLASS_MASKS_FIELD" />
</Match>
-
+ <Match>
+ <Class
name="org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher"
/>
+ <Method name = "launchPig" />
+ <Bug pattern="DE_MIGHT_IGNORE" />
+ </Match>
</FindBugsFilter>
Modified: hadoop/pig/trunk/test/org/apache/pig/test/TestPruneColumn.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/TestPruneColumn.java?rev=893053&r1=893052&r2=893053&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/TestPruneColumn.java (original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/TestPruneColumn.java Tue Dec 22
01:30:12 2009
@@ -1656,4 +1656,25 @@
"No map keys pruned for C"}));
}
+ // See PIG-1165
+ @Test
+ public void testOrderbyWrongSignature() throws Exception {
+ pigServer.registerQuery("A = load '"+
Util.generateURI(tmpFile1.toString()) + "' AS (a0, a1, a2);");
+ pigServer.registerQuery("B = load '"+
Util.generateURI(tmpFile2.toString()) + "' AS (b0, b1);");
+ pigServer.registerQuery("C = order A by a1;");
+ pigServer.registerQuery("D = join C by a1, B by b0;");
+ pigServer.registerQuery("E = foreach D generate a1, b0, b1;");
+ Iterator<Tuple> iter = pigServer.openIterator("E");
+
+ assertTrue(iter.hasNext());
+ Tuple t = iter.next();
+
+ assertTrue(t.size()==3);
+ assertTrue(t.toString().equals("(2,2,2)"));
+
+ assertFalse(iter.hasNext());
+
+ assertTrue(checkLogFileMessage(new String[]{"Columns pruned for A: $0,
$2",
+ "No map keys pruned for A", "No column pruned for B", "No map keys
pruned for B"}));
+ }
}