Author: daijy
Date: Wed Sep 22 00:48:36 2010
New Revision: 999699

URL: http://svn.apache.org/viewvc?rev=999699&view=rev
Log:
PIG-1598: Pig gobbles up error messages - Part 2

Modified:
    hadoop/pig/branches/branch-0.8/CHANGES.txt
    hadoop/pig/branches/branch-0.8/src/org/apache/pig/PigServer.java
    
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
    
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MRCompiler.java

Modified: hadoop/pig/branches/branch-0.8/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/branch-0.8/CHANGES.txt?rev=999699&r1=999698&r2=999699&view=diff
==============================================================================
--- hadoop/pig/branches/branch-0.8/CHANGES.txt (original)
+++ hadoop/pig/branches/branch-0.8/CHANGES.txt Wed Sep 22 00:48:36 2010
@@ -192,6 +192,9 @@ PIG-1353: Map-side joins (ashutoshc)
 PIG-1309: Map-side Cogroup (ashutoshc)
 
 BUG FIXES
+
+PIG-1598: Pig gobbles up error messages - Part 2 (nrai via daijy)
+
 PIG-1616: 'union onschema' does not use create output with correct schema
 when udfs are involved (thejas)
 

Modified: hadoop/pig/branches/branch-0.8/src/org/apache/pig/PigServer.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/branch-0.8/src/org/apache/pig/PigServer.java?rev=999699&r1=999698&r2=999699&view=diff
==============================================================================
--- hadoop/pig/branches/branch-0.8/src/org/apache/pig/PigServer.java (original)
+++ hadoop/pig/branches/branch-0.8/src/org/apache/pig/PigServer.java Wed Sep 22 
00:48:36 2010
@@ -1189,7 +1189,7 @@ public class PigServer {
         return executeCompiledLogicalPlan(typeCheckedLp);
     }
     
-    private PigStats executeCompiledLogicalPlan(LogicalPlan compiledLp) throws 
ExecException {
+    private PigStats executeCompiledLogicalPlan(LogicalPlan compiledLp) throws 
ExecException, FrontendException {
         // discover pig features used in this script
         ScriptState.get().setScriptFeatures(compiledLp);
         PhysicalPlan pp = compilePp(compiledLp);

Modified: 
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java?rev=999699&r1=999698&r2=999699&view=diff
==============================================================================
--- 
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
 (original)
+++ 
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
 Wed Sep 22 00:48:36 2010
@@ -287,7 +287,7 @@ public class HExecutionEngine {
     }
 
     public List<ExecJob> execute(PhysicalPlan plan,
-                                 String jobName) throws ExecException {
+                                 String jobName) throws ExecException, 
FrontendException {
         MapReduceLauncher launcher = new MapReduceLauncher();
         List<ExecJob> jobs = new ArrayList<ExecJob>();
 
@@ -319,8 +319,11 @@ public class HExecutionEngine {
         } catch (Exception e) {
             // There are a lot of exceptions thrown by the launcher.  If this
             // is an ExecException, just let it through.  Else wrap it.
-            if (e instanceof ExecException) throw (ExecException)e;
-            else {
+            if (e instanceof ExecException){
+               throw (ExecException)e;
+            } else if (e instanceof FrontendException) {
+               throw (FrontendException)e;
+            } else {
                 int errCode = 2043;
                 String msg = "Unexpected error during execution.";
                 throw new ExecException(msg, errCode, PigException.BUG, e);

Modified: 
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MRCompiler.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MRCompiler.java?rev=999699&r1=999698&r2=999699&view=diff
==============================================================================
--- 
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MRCompiler.java
 (original)
+++ 
hadoop/pig/branches/branch-0.8/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/MRCompiler.java
 Wed Sep 22 00:48:36 2010
@@ -1052,7 +1052,8 @@ public class MRCompiler extends PhyPlanV
             LoadFunc loadFunc = ((POLoad)phyOp).getLoadFunc();
             try {
                 
if(!(CollectableLoadFunc.class.isAssignableFrom(loadFunc.getClass()))){
-                    throw new MRCompilerException("While using 'collected' on 
group; data must be loaded via loader implementing CollectableLoadFunc.");
+                    int errCode = 2249;
+                    throw new MRCompilerException("While using 'collected' on 
group; data must be loaded via loader implementing CollectableLoadFunc.", 
errCode);
                 }
                 
((CollectableLoadFunc)loadFunc).ensureAllKeyInstancesInSameSplit();
             } catch (MRCompilerException e){
@@ -1077,8 +1078,9 @@ public class MRCompiler extends PhyPlanV
             }    
         }
         else if(!curMROp.reduceDone){
+               int errCode=2250;
             String msg = "Blocking operators are not allowed before Collected 
Group. Consider dropping using 'collected'.";
-            throw new MRCompilerException(msg, PigException.BUG);   
+            throw new MRCompilerException(msg, errCode, PigException.BUG);   
         }
         else{
             int errCode = 2022;
@@ -1336,9 +1338,10 @@ public class MRCompiler extends PhyPlanV
     public void visitMergeCoGroup(POMergeCogroup poCoGrp) throws 
VisitorException {
 
         if(compiledInputs.length < 2){
+            int errCode=2251;
             String errMsg = "Merge Cogroup work on two or more relations." +
                        "To use map-side group-by on single relation, use 
'collected' qualifier.";
-            throw new MRCompilerException(errMsg);
+            throw new MRCompilerException(errMsg, errCode);
         }
             
         List<FuncSpec> funcSpecs = new 
ArrayList<FuncSpec>(compiledInputs.length-1);
@@ -1372,14 +1375,18 @@ public class MRCompiler extends PhyPlanV
                 LoadFunc loadfunc = sideLoader.getLoadFunc();
                 if(i == 0){
                     
-                    
if(!(CollectableLoadFunc.class.isAssignableFrom(loadfunc.getClass())))
-                        throw new MRCompilerException("Base loader in Cogroup 
must implement CollectableLoadFunc.");
+                    
if(!(CollectableLoadFunc.class.isAssignableFrom(loadfunc.getClass()))){
+                       int errCode = 2252;
+                        throw new MRCompilerException("Base loader in Cogroup 
must implement CollectableLoadFunc.", errCode);
+                    }
                     
                     
((CollectableLoadFunc)loadfunc).ensureAllKeyInstancesInSameSplit();
                     continue;
                 }
-                
if(!(IndexableLoadFunc.class.isAssignableFrom(loadfunc.getClass())))
-                    throw new MRCompilerException("Side loaders in cogroup 
must implement IndexableLoadFunc.");
+                
if(!(IndexableLoadFunc.class.isAssignableFrom(loadfunc.getClass()))){
+                    int errCode = 2253;
+                    throw new MRCompilerException("Side loaders in cogroup 
must implement IndexableLoadFunc.", errCode);
+                }
                 
                 funcSpecs.add(funcSpec);
                 fileSpecs.add(loadFileSpec.getFileName());
@@ -1393,8 +1400,10 @@ public class MRCompiler extends PhyPlanV
             
             // Use map-reduce operator of base relation for the cogroup 
operation.
             MapReduceOper baseMROp = 
phyToMROpMap.get(poCoGrp.getInputs().get(0));
-            if(baseMROp.mapDone || !baseMROp.reducePlan.isEmpty())
-                throw new MRCompilerException("Currently merged cogroup is not 
supported after blocking operators.");
+            if(baseMROp.mapDone || !baseMROp.reducePlan.isEmpty()){
+                int errCode = 2254;
+                throw new MRCompilerException("Currently merged cogroup is not 
supported after blocking operators.", errCode);
+            }
             
             // Create new map-reduce operator for indexing job and then 
configure it.
             MapReduceOper indexerMROp = getMROp();
@@ -1518,8 +1527,10 @@ public class MRCompiler extends PhyPlanV
     public void visitMergeJoin(POMergeJoin joinOp) throws VisitorException {
 
         try{
-            if(compiledInputs.length != 2 || joinOp.getInputs().size() != 2)
-                throw new MRCompilerException("Merge Join must have exactly 
two inputs. Found : "+compiledInputs.length, 1101);
+            if(compiledInputs.length != 2 || joinOp.getInputs().size() != 2){
+                int errCode=1101;
+                throw new MRCompilerException("Merge Join must have exactly 
two inputs. Found : "+compiledInputs.length, errCode);
+            }
 
             OperatorKey leftPhyOpKey = 
joinOp.getInputs().get(0).getOperatorKey();
             OperatorKey rightPhyOpKey = 
joinOp.getInputs().get(1).getOperatorKey();
@@ -1819,7 +1830,8 @@ public class MRCompiler extends PhyPlanV
     public void visitSkewedJoin(POSkewedJoin op) throws VisitorException {
                try {
                        if (compiledInputs.length != 2) {
-                               throw new VisitorException("POSkewedJoin 
operator has " + compiledInputs.length + " inputs. It should have 2.");
+                               int errCode = 2255;
+                               throw new VisitorException("POSkewedJoin 
operator has " + compiledInputs.length + " inputs. It should have 2.", errCode);
                        }
                        
                        //change plan to store the first join input into a temp 
file


Reply via email to