Author: pradeepkth
Date: Sat Jun 13 05:21:43 2009
New Revision: 784333
URL: http://svn.apache.org/viewvc?rev=784333&view=rev
Log:
MultiQuery optimization in some cases has an issue when there is a split in the
map plan (pradeepkth)
Modified:
hadoop/pig/trunk/CHANGES.txt
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MultiQueryOptimizer.java
hadoop/pig/trunk/test/org/apache/pig/test/TestEvalPipeline2.java
hadoop/pig/trunk/test/org/apache/pig/test/TestKeyTypeDiscoveryVisitor.java
hadoop/pig/trunk/test/org/apache/pig/test/TestMultiQuery.java
hadoop/pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java
hadoop/pig/trunk/test/org/apache/pig/test/TestUnion.java
hadoop/pig/trunk/test/org/apache/pig/test/Util.java
Modified: hadoop/pig/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/CHANGES.txt?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/CHANGES.txt (original)
+++ hadoop/pig/trunk/CHANGES.txt Sat Jun 13 05:21:43 2009
@@ -77,6 +77,9 @@
BUG FIXES
+PIG-846: MultiQuery optimization in some cases has an issue when there is a
+split in the map plan (pradeepkth)
+
PIG-835: Multiquery optimization does not handle the case where the map keys
in the split plans have different key types (tuple and non tuple key type)
(pradeepkth)
Modified:
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MultiQueryOptimizer.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MultiQueryOptimizer.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
---
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MultiQueryOptimizer.java
(original)
+++
hadoop/pig/trunk/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MultiQueryOptimizer.java
Sat Jun 13 05:21:43 2009
@@ -20,6 +20,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -502,18 +503,27 @@
POPackage pk = (POPackage)from.getRoots().get(0);
from.remove(pk);
- // XXX the index of the original keyInfo map is always 0,
- // we need to shift the index so that the lookups works
- // with the new indexed key
- Map<Integer, Pair<Boolean, Map<Integer, Integer>>> keyInfo =
pk.getKeyInfo();
- if (keyInfo != null && keyInfo.size() > 0) {
- byte b = (byte)(initial | PigNullableWritable.mqFlag);
- keyInfo.put(new Integer(b), keyInfo.get(0));
- }
+ if(!(pk instanceof POMultiQueryPackage)){
+ // XXX the index of the original keyInfo map is always 0,
+ // we need to shift the index so that the lookups works
+ // with the new indexed key
+ addShiftedKeyInfoIndex(initial, pk);
+ }
if (pk instanceof POMultiQueryPackage) {
POMultiQueryPackage mpkg = (POMultiQueryPackage)pk;
setBaseIndexOnPackage(initial, mpkg);
+ // we should update the keyinfo map of the
+ // POPackage objects in the POMultiQueryPackage to
+ // have the shifted index - The index now will be
+ // starting from "initial" going up to "current"
+ // ORed with the multi query bit mask
+ int retIndex = addShiftedKeyInfoIndex(initial, current, mpkg);
+ if(retIndex != current) {
+ int errCode = 2146;
+ String msg = "Internal Error. Inconsistency in key index found
during optimization.";
+ throw new OptimizerException(msg, errCode, PigException.BUG);
+ }
}
PhysicalOperator root = from.getRoots().get(0);
@@ -539,6 +549,86 @@
}
}
+ private void addShiftedKeyInfoIndex(int index, POPackage pkg) throws
OptimizerException {
+ /**
+ * we only do multi query optimization for single input MROpers
+ * Hence originally the keyInfo would have had only index 0. As
+ * we merge MROpers into parent MROpers we add entries for the
+ * multiquery based index (ORed with multi query bit mask). These
additions
+ * would mean we have many entries in the keyInfo while really it
should
+ * only have one since there is only one input that the package would
+ * be processing and hence only one index. So each time we add an entry
+ * for a new shifted index, we should clean up keyInfo so that it has
only one entry
+ * - the valid entry at that point. The "value" in the keyInfo map for
the new
+ * addition should be the same as the "value" in the existing Entry.
After
+ * addition, we should remove the older entry
+ */
+ Map<Integer, Pair<Boolean, Map<Integer, Integer>>> keyInfo =
pkg.getKeyInfo();
+ byte newIndex = (byte)(index | PigNullableWritable.mqFlag);
+
+ Set<Integer> existingIndices = keyInfo.keySet();
+ if(existingIndices.size() != 1) {
+ // we always maintain one entry in the keyinfo
+ // which is the valid entry at the given stage of
+ // multi query optimization
+ int errCode = 2146;
+ String msg = "Internal Error. Inconsistency in key index found
during optimization.";
+ throw new OptimizerException(msg, errCode, PigException.BUG);
+ }
+ int existingIndex = existingIndices.iterator().next();
+ keyInfo.put(new Integer(newIndex), keyInfo.get(existingIndex));
+
+ // clean up the old entry so we only keep
+ // the valid entry around - if we did something wrong while
+ // setting this up, we will fail at runtime which is better
+ // than doing something wrong and giving incorrect results!
+ if(newIndex != existingIndex) {
+ keyInfo.remove(existingIndex);
+ }
+ }
+
+ /**
+ * @param initialIndex
+ * @param onePastEndIndex
+ * @param mpkg
+ * @throws OptimizerException
+ */
+ private int addShiftedKeyInfoIndex(int initialIndex, int onePastEndIndex,
+ POMultiQueryPackage mpkg) throws OptimizerException {
+ // recursively iterate over the packages in the
+ // POMultiQueryPackage adding a shifted keyInfoIndex entry
+ // in the packages in order going from initialIndex upto
+ // onePastEndIndex (exclusive) flattening out any nested
+ // packages in nested POMultiqueryPackages as we traverse
+ List<POPackage> pkgs = mpkg.getPackages();
+ // if we have lesser pkgs than (onePastEndIndex - initialIndex)
+ // its because one or more of the pkgs is a POMultiQueryPackage which
+ // internally has packages.
+ int numIndices = (onePastEndIndex - initialIndex);
+ int end = numIndices;
+ if(numIndices > pkgs.size()) {
+ end = pkgs.size();
+ } else if (numIndices < pkgs.size()) {
+ int errCode = 2146;
+ String msg = "Internal Error. Inconsistency in key index found
during optimization.";
+ throw new OptimizerException(msg, errCode, PigException.BUG);
+ }
+ int i = 0;
+ int curIndex = initialIndex;
+ while (i < end) {
+ POPackage pkg = pkgs.get(i);
+ if(pkg instanceof POMultiQueryPackage) {
+ curIndex = addShiftedKeyInfoIndex(curIndex, onePastEndIndex,
(POMultiQueryPackage)pkg);
+ } else {
+ addShiftedKeyInfoIndex(curIndex, pkg);
+ curIndex++;
+ }
+ i++;
+ }
+ return curIndex; // could be used in a caller who recursively called
this function
+
+ }
+
private void mergeOneCombinePlanWithIndex(PhysicalPlan from,
PhysicalPlan to, int initial, int current, byte mapKeyType) throws
VisitorException {
POPackage cpk = (POPackage)from.getRoots().get(0);
@@ -664,7 +754,73 @@
// > index + 1
int incIndex = mergeOneMapPlanWithIndex(
mrOp.mapPlan, splitOp, index, sameKeyType);
-
+
+ // In the combine and reduce plans the Demux and
POMultiQueryPackage
+ // operators' baseIndex is set whenever the incIndex above is >
index + 1
+ // What does this 'baseIndex' mean - here is an attempt to explain
it:
+ // Consider a map - reduce plan layout as shown below (the comments
+ // apply even if a combine plan was present) - An explanation of
the "index"
+ // and "baseIndex" fields follows:
+ // The numbers in parenthesis () are "index" values - Note that in
multiquery
+ // optimizations these indices are actually ORed with a bitmask
(0x80) on the
+ // map side in the LocalRearrange. The POMultiQueryPackage and
PODemux operators
+ // then remove this bitmask to restore the original index values.
In the commentary
+ // below, indices will be referred to without this bitmask - the
bitmask is only
+ // to identify the multiquery optimization during comparsion - for
details see the comments
+ // in POLocalRearrange.setIndex().
+ // The numbers in brackets [] are "baseIndex" values. These
baseIndex values are
+ // used by POMultiQueryPackage and PODemux to calculate the an
arrayList index which
+ // they use to pick the right package or inner plan respectively.
All this is needed
+ // since on the map side the indices are assigned after flattening
all POLocalRearranges
+ // including those nested in Splits into one flat space (as can be
noticed by the
+ // numbering of the indices under the split in the example below).
The optimizer then
+ // duplicates the POMultiQueryPackage and Demux inner plan
corresponding to the split
+ // in the reduce plan (the entities with * in the figure below).
Now if a key with index '1'
+ // is emitted, it goes to the first POMultiQueryPackage in the
reduce plan below, which
+ // then picks the second package in its arraylist of packages
which is a
+ // POMultiQueryPackage (the first with a * below). This
POMultiQueryPackage then picks
+ // the first package in its list (it arrives at this arraylist
index of 0 by subtracting
+ // the baseIndex (1) from the index coming in (1)). So the record
emitted by LocalRearrange(1)
+ // reaches the right package. Likewise, if LocalRearrange(2) emits
a key,value they would have
+ // an index 2. The first POMultiQueryPackage (with baseIndex 0
below) would pick the 3rd package
+ // (arraylist index 2 arrived at by doing index - baseIndex which
is 2 - 0). This is the
+ // duplicated POMultiQueryPackage with baseIndex 1. This would
inturn pick the second package
+ // in its arraylist (arraylist index 1 arrived at by doing index -
baseIndex which is 2 - 1)
+ // The idea is that through duplication we make it easier to
determine which POPackage to pick.
+ // The exact same logic is used by PODemux to pick the right inner
plan from its arraylist of
+ // inner plans.
+
+ // The arrows in the figure below show the correspondence between
the different operators
+ // and plans .i.e the end of an arrow points to the operator or
plan which consumes
+ // data coming from the root of the arrow
+
+ // If you look at the layout below column-wise, each "column" is a
MROper
+ // which is being merged into the parent MROper - the Split
represents a
+ // MROper which inside of it has 2 MROpers merged in.
+ // A star (*) next to an operator means it is the same object as
the
+ // other operator with a star(*) - Essentially the same object
reference
+ // is copied twice.
+ // LocalRearrange(0) Split
LocalRearrange(3)
+ // | / \
|
+ // | LocalRearrange(1) LocalRearrange(2)
|
+ // | | MAP PLAN |
|
+ //
----|-------------|---------------------------|-------------------|--------------------
+ // | | REDUCE PLAN |
|
+ // | | POMultiQueryPackage[0] |
|
+ // V V | contains V
V
+ // [ POPackage, POMultiQueryPackage[1]*,POMultiQueryPackage[1]*,
POPackage]
+ // | / \ / \
|
+ // | POPackage POPackage POPackage POPackage
|
+ // | \ |
|
+ // | \ Demux[0] |
|
+ // V V | contains V
V
+ // [ planOfRedOperators,planWithDemux*, planWithDemux*,
planOfRedOperators]
+ // / | | \
+ // / Demux[1] Demux[1] \
+ // V | | V
+ //
[planOfRedOps,planOfRedOps][planOfRedOps,planOfRedOps]
+ //
+
// merge the combiner plan
if (comPl != null) {
if (!mrOp.combinePlan.isEmpty()) {
@@ -676,7 +832,6 @@
throw new OptimizerException(msg, errCode,
PigException.BUG);
}
}
-
// merge the reducer plan
mergeOneReducePlanWithIndex(
mrOp.reducePlan, redPl, index, incIndex, mrOp.mapKeyType);
Modified: hadoop/pig/trunk/test/org/apache/pig/test/TestEvalPipeline2.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/TestEvalPipeline2.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/TestEvalPipeline2.java (original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/TestEvalPipeline2.java Sat Jun 13
05:21:43 2009
@@ -299,7 +299,7 @@
"b = foreach a generate $0, CONCAT($0, '\u0005'), $1; "
+
"store b into 'testPigStorageWithCtrlCharsOutput.txt'
using PigStorage('\u0001');" +
"c = load 'testPigStorageWithCtrlCharsOutput.txt' using
PigStorage('\u0001') as (f1:chararray, f2:chararray, f3:chararray);";
- Util.registerQuery(pigServer, script);
+ Util.registerMultiLineQuery(pigServer, script);
Iterator<Tuple> it = pigServer.openIterator("c");
HashMap<String, Tuple> expectedResults = new HashMap<String, Tuple>();
expectedResults.put("hello", (Tuple)
Util.getPigConstant("('hello','hello\u0005','world')"));
Modified:
hadoop/pig/trunk/test/org/apache/pig/test/TestKeyTypeDiscoveryVisitor.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/TestKeyTypeDiscoveryVisitor.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/TestKeyTypeDiscoveryVisitor.java
(original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/TestKeyTypeDiscoveryVisitor.java
Sat Jun 13 05:21:43 2009
@@ -72,7 +72,7 @@
// The null records will get discarded by the join and
hence the join
// output would be {(1, 15L, 1, 20, 70L)}
"join_a_b = join b_sum by x, a_aggs by x;";
- Util.registerQuery(pigServer, script);
+ Util.registerMultiLineQuery(pigServer, script);
Iterator<Tuple> it = pigServer.openIterator("join_a_b");
Object[] results = new Object[] { 1, 15L, 1, 20, 70L};
Tuple output = it.next();
Modified: hadoop/pig/trunk/test/org/apache/pig/test/TestMultiQuery.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/TestMultiQuery.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/TestMultiQuery.java (original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/TestMultiQuery.java Sat Jun 13
05:21:43 2009
@@ -42,6 +42,7 @@
import
org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator;
import
org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POSplit;
import
org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POStore;
+import org.apache.pig.data.DataBag;
import org.apache.pig.data.Tuple;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.io.FileLocalizer;
@@ -1093,9 +1094,86 @@
e.printStackTrace();
Assert.fail();
}
- }
+ }
@Test
+ public void testMultiQueryWithSplitInMapAndMultiMerge() throws Exception {
+
+ // clean up any existing dirs/files
+ String[] toClean = {"tmwsimam-input.txt", "foo1", "foo2", "foo3",
"foo4" };
+ for (int j = 0; j < toClean.length; j++) {
+ Util.deleteFile(cluster, toClean[j]);
+ }
+
+ // the data below is tab delimited
+ String[] inputData = {
+ "1 a b e f i j m n",
+ "2 a b e f i j m n",
+ "3 c d g h k l o p",
+ "4 c d g h k l o p" };
+ Util.createInputFile(cluster, "tmwsimam-input.txt", inputData);
+ String query =
+ "A = LOAD 'tmwsimam-input.txt' " +
+ "as (f0:chararray, f1:chararray, f2:chararray, f3:chararray, " +
+ "f4:chararray, f5:chararray, f6:chararray, f7:chararray,
f8:chararray); " +
+ "B = FOREACH A GENERATE f0, f1, f2, f3, f4;" +
+ "B1 = foreach B generate f0, f1, f2;" +
+ "C = GROUP B1 BY (f1, f2);" +
+ "STORE C into 'foo1' using BinStorage();" +
+ "B2 = FOREACH B GENERATE f0, f3, f4;" +
+ "E = GROUP B2 BY (f3, f4);" +
+ "STORE E into 'foo2' using BinStorage();" +
+ "F = FOREACH A GENERATE f0, f5, f6, f7, f8;" +
+ "F1 = FOREACH F GENERATE f0, f5, f6;" +
+ "G = GROUP F1 BY (f5, f6);" +
+ "STORE G into 'foo3' using BinStorage();" +
+ "F2 = FOREACH F GENERATE f0, f7, f8;" +
+ "I = GROUP F2 BY (f7, f8);" +
+ "STORE I into 'foo4' using BinStorage();" +
+ "explain;";
+ myPig.setBatchOn();
+ Util.registerMultiLineQuery(myPig, query);
+ myPig.executeBatch();
+
+ String templateLoad = "a = load 'foo' using BinStorage();";
+
+ Map<Tuple, DataBag> expectedResults = new HashMap<Tuple, DataBag>();
+ expectedResults.put((Tuple)Util.getPigConstant("('a','b')"),
+
(DataBag)Util.getPigConstant("{('1','a','b'),('2','a','b')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('c','d')"),
+
(DataBag)Util.getPigConstant("{('3','c','d'),('4','c','d')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('e','f')"),
+
(DataBag)Util.getPigConstant("{('1','e','f'),('2','e','f')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('g','h')"),
+
(DataBag)Util.getPigConstant("{('3','g','h'),('4','g','h')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('i','j')"),
+
(DataBag)Util.getPigConstant("{('1','i','j'),('2','i','j')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('k','l')"),
+
(DataBag)Util.getPigConstant("{('3','k','l'),('4','k','l')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('m','n')"),
+
(DataBag)Util.getPigConstant("{('1','m','n'),('2','m','n')}"));
+ expectedResults.put((Tuple)Util.getPigConstant("('o','p')"),
+
(DataBag)Util.getPigConstant("{('3','o','p'),('4','o','p')}"));
+ String[] outputDirs = { "foo1", "foo2", "foo3", "foo4" };
+ for(int k = 0; k < outputDirs.length; k++) {
+ myPig.registerQuery(templateLoad.replace("foo", outputDirs[k]));
+ Iterator<Tuple> it = myPig.openIterator("a");
+ int numTuples = 0;
+ while(it.hasNext()) {
+ Tuple t = it.next();
+ assertEquals(expectedResults.get(t.get(0)), t.get(1));
+ numTuples++;
+ }
+ assertEquals(numTuples, 2);
+ }
+ // cleanup
+ for (int j = 0; j < toClean.length; j++) {
+ Util.deleteFile(cluster, toClean[j]);
+ }
+
+ }
+
+ @Test
public void testMultiQueryWithTwoStores() {
System.out.println("===== multi-query with 2 stores =====");
Modified:
hadoop/pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java
(original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/TestRelationToExprProject.java
Sat Jun 13 05:21:43 2009
@@ -92,7 +92,7 @@
" filter_one = filter test by (col2==1);" +
" filter_notone = filter test by (col2!=1);" +
" generate group as col1, COUNT(filter_one) as
cnt_one, COUNT(filter_notone) as cnt_notone;};";
- Util.registerQuery(pigServer, script);
+ Util.registerMultiLineQuery(pigServer, script);
Iterator<Tuple> it = pigServer.openIterator("test3");
Tuple[] expected = new DefaultTuple[2];
expected[0] = (Tuple) Util.getPigConstant("(1,1L,1L)");
@@ -145,7 +145,7 @@
" matchedcount as matchedcount," +
" A.str;" +
" };";
- Util.registerQuery(pigServer, query);
+ Util.registerMultiLineQuery(pigServer, query);
Iterator<Tuple> it = pigServer.openIterator("Cfiltered");
Map<String, Tuple> expected = new HashMap<String, Tuple>();
expected.put("a", (Tuple)
Util.getPigConstant("('a',1L,{('hello'),('goodbye')})"));
@@ -179,7 +179,7 @@
" }" +
"TESTDATA_AGG_1 = group TESTDATA_AGG ALL;" +
"TESTDATA_AGG_2 = foreach TESTDATA_AGG_1 generate
COUNT(TESTDATA_AGG);" ;
- Util.registerQuery(pigServer, query);
+ Util.registerMultiLineQuery(pigServer, query);
Iterator<Tuple> it = pigServer.openIterator("TESTDATA_AGG_2");
int i = 0;
@@ -205,7 +205,7 @@
"test3 = foreach test2 {" +
" filter_one = filter test by (col2==1);" +
" generate filter_one;};";
- Util.registerQuery(pigServer, script);
+ Util.registerMultiLineQuery(pigServer, script);
Iterator<Tuple> it = pigServer.openIterator("test3");
Map<Tuple, Integer> expected = new HashMap<Tuple, Integer>();
expected.put((Tuple) Util.getPigConstant("({(1,1,3)})"), 0);
@@ -246,7 +246,7 @@
"test3 = foreach test2 {" +
" filter_one = filter test by (col2==1);" +
" generate group, filter_one;};";
- Util.registerQuery(pigServer, script);
+ Util.registerMultiLineQuery(pigServer, script);
Iterator<Tuple> it = pigServer.openIterator("test3");
Map<Tuple, Integer> expected = new HashMap<Tuple, Integer>();
expected.put((Tuple) Util.getPigConstant("(1,{(1,1,3)})"), 0);
Modified: hadoop/pig/trunk/test/org/apache/pig/test/TestUnion.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/TestUnion.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/TestUnion.java (original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/TestUnion.java Sat Jun 13
05:21:43 2009
@@ -224,7 +224,7 @@
Util.createInputFile(cluster, "input1.txt", new String[] {"dummy"});
Util.createInputFile(cluster, "input2.txt", new String[] {"dummy"});
PigServer pig = new PigServer(ExecType.MAPREDUCE,
cluster.getProperties());
- Util.registerQuery(pig, "a = load 'input1.txt';" +
+ Util.registerMultiLineQuery(pig, "a = load 'input1.txt';" +
"b = load 'input2.txt';" +
"c = foreach a generate 1, {(1, 'str1')};" +
"d = foreach b generate 2, {(2, 'str2')};" +
Modified: hadoop/pig/trunk/test/org/apache/pig/test/Util.java
URL:
http://svn.apache.org/viewvc/hadoop/pig/trunk/test/org/apache/pig/test/Util.java?rev=784333&r1=784332&r2=784333&view=diff
==============================================================================
--- hadoop/pig/trunk/test/org/apache/pig/test/Util.java (original)
+++ hadoop/pig/trunk/test/org/apache/pig/test/Util.java Sat Jun 13 05:21:43 2009
@@ -392,7 +392,7 @@
return comp.getMRPlan();
}
- public static void registerQuery(PigServer pigServer, String query) throws
IOException {
+ public static void registerMultiLineQuery(PigServer pigServer, String
query) throws IOException {
File f = File.createTempFile("tmp", "");
PrintWriter pw = new PrintWriter(f);
pw.println(query);