Author: pradeepkth
Date: Tue Jan 26 00:02:23 2010
New Revision: 903018

URL: http://svn.apache.org/viewvc?rev=903018&view=rev
Log:
svn merge -r896951:902253 http://svn.apache.org/repos/asf/hadoop/pig/trunk

Added:
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_ref1.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_ref2.xml
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/AndExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/BinaryExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/ColumnExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/ConstantExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/EqualExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/LogicalExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/LogicalExpressionPlan.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/LogicalExpressionVisitor.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/expression/ProjectExpression.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/relational/
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/relational/LOLoad.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/relational/LogicalPlan.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/relational/LogicalPlanVisitor.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/relational/LogicalRelationalOperator.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/logical/relational/LogicalSchema.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/BaseOperatorPlan.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/DependencyOrderWalker.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/DepthFirstWalker.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/Operator.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/OperatorPlan.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/PlanEdge.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/PlanVisitor.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/PlanWalker.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/ReverseDependencyOrderWalker.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/optimizer/
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/optimizer/PlanOptimizer.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/optimizer/PlanTransformListener.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/optimizer/Rule.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/experimental/plan/optimizer/Transformer.java
    
hadoop/pig/branches/load-store-redesign/test/org/apache/pig/test/TestExperimentalOperatorPlan.java
    
hadoop/pig/branches/load-store-redesign/test/org/apache/pig/test/TestExperimentalRule.java
Removed:
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_reference.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_users.xml
Modified:
    hadoop/pig/branches/load-store-redesign/CHANGES.txt
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/cookbook.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/index.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/setup.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/site.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/zebra_pig.xml
    
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/zebra_users.xml
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/backend/hadoop/executionengine/mapReduceLayer/SecondaryKeyOptimizer.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/backend/hadoop/executionengine/physicalLayer/relationalOperators/POSort.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/data/DefaultDataBag.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/data/DistinctDataBag.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/data/InternalDistinctBag.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/data/InternalSortedBag.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/data/SortedDataBag.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/impl/logicalLayer/LOCast.java
    
hadoop/pig/branches/load-store-redesign/src/org/apache/pig/impl/logicalLayer/optimizer/SchemaRemover.java
    
hadoop/pig/branches/load-store-redesign/test/org/apache/pig/test/TestEvalPipeline2.java
    
hadoop/pig/branches/load-store-redesign/test/org/apache/pig/test/TestSecondarySort.java
    
hadoop/pig/branches/load-store-redesign/test/org/apache/pig/test/TestTypeCheckingValidator.java

Modified: hadoop/pig/branches/load-store-redesign/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/load-store-redesign/CHANGES.txt?rev=903018&r1=903017&r2=903018&view=diff
==============================================================================
--- hadoop/pig/branches/load-store-redesign/CHANGES.txt (original)
+++ hadoop/pig/branches/load-store-redesign/CHANGES.txt Tue Jan 26 00:02:23 2010
@@ -51,6 +51,8 @@
 
 IMPROVEMENTS
 
+PIG-1192: Pig 0.6 Docs fixes (chandec via olgan)
+
 PIG-1177: Pig 0.6 Docs - Zebra docs (chandec via olgan)
 
 PIG-1175: Pig 0.6 Docs - Store v. Dump (chandec via olgan)
@@ -103,6 +105,9 @@
 
 BUG FIXES
 
+PIG-1191:  POCast throws exception for certain sequences of LOAD, FILTER,
+                       FORACH (pradeepkth via gates)
+
 PIG-1171: Top-N queries produce incorrect results when followed by a cross 
statement (rding via olgan)
 
 PIG-1159: merge join right side table does not support comma seperated paths
@@ -369,6 +374,15 @@
 PIG-1180: Piggybank should compile even if we only have
 "pig-withouthadoop.jar" but no "pig.jar" in the pig home directory (daijy)
 
+PIG-1185: Data bags do not close spill files after using iterator to read
+tuples (yinghe via daijy)
+
+PIG-1186: Pig do not take values in "pig-cluster-hadoop-site.xml" (daijy)
+
+PIG-1193: Secondary sort issue on nested desc sort (daijy)
+
+PIG-1195: POSort should take care of sort order (daijy)
+
 Release 0.5.0
 
 INCOMPATIBLE CHANGES

Modified: 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/cookbook.xml
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/cookbook.xml?rev=903018&r1=903017&r2=903018&view=diff
==============================================================================
--- 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/cookbook.xml
 (original)
+++ 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/cookbook.xml
 Tue Jan 26 00:02:23 2010
@@ -36,7 +36,7 @@
 
 <section>
 <title>Use Optimization</title>
-<p>Pig supports various <a 
href="piglatin_users.html#Optimization+Rules">optimization rules</a> which are 
turned on by default. 
+<p>Pig supports various <a 
href="piglatin_ref1.html#Optimization+Rules">optimization rules</a> which are 
turned on by default. 
 Become familiar with these rules.</p>
 </section>
 
@@ -220,29 +220,34 @@
 <section>
 <title>Specialized Join Optimizations</title>
 <p>Optimization can also be achieved using fragment replicate joins, skewed 
joins, and merge joins. 
-For more information see <a 
href="piglatin_users.html#Specialized+Joins">Specialized Joins</a>.</p>
+For more information see <a 
href="piglatin_ref1.html#Specialized+Joins">Specialized Joins</a>.</p>
 </section>
 
 </section>
 
 
 <section>
-<title>Use the PARALLEL Keyword</title>
+<title>Use the PARALLEL Clause</title>
 
-<p>PARALLEL controls the number of reducers invoked by Hadoop. The default 
value is 1. However, the number of reducers you need for a particular construct 
in Pig that forms a MapReduce boundary depends entirely on (1) your data and 
the number of intermediate keys you are generating in your mappers  and (2) the 
partitioner and distribution of map (combiner) output keys. In the best cases 
we have seen that a reducer processing about 500 MB of data behaves 
efficiently.</p>
+<p>Use the PARALLEL clause to increase the parallelism of a job:</p>
+<ul>
+<li>PARALLEL sets the number of reduce tasks for the MapReduce jobs generated 
by Pig. The default value is 1 (one reduce task).</li>
+<li>PARALLEL only affects the number of reduce tasks. Map parallelism is 
determined by the input file, one map for each HDFS block. </li>
+<li>If you don’t specify PARALLEL, you still get the same map parallelism 
but only one reduce task.</li>
+</ul>
+<p></p>
+<p>As noted, the default value for PARALLEL is 1 (one reduce task). However, 
the number of reducers you need for a particular construct in Pig that forms a 
MapReduce boundary depends entirely on (1) your data and the number of 
intermediate keys you are generating in your mappers  and (2) the partitioner 
and distribution of map (combiner) output keys. In the best cases we have seen 
that a reducer processing about 500 MB of data behaves efficiently.</p>
 
-<p>The keyword makes sense with any operator that starts a reduce phase. This 
includes  
-<a href="piglatin_reference.html#COGROUP">COGROUP</a>, 
-<a href="piglatin_reference.html#CROSS">CROSS</a>, 
-<a href="piglatin_reference.html#DISTINCT">DISTINCT</a>, 
-<a href="piglatin_reference.html#GROUP">GROUP</a>, 
-<a href="piglatin_reference.html#JOIN">JOIN</a>, 
-<a href="piglatin_reference.html#ORDER">ORDER</a>, and 
-<a href="piglatin_reference.html#JOIN%2C+OUTER">OUTER JOIN</a>.
-
-</p>
-
-<p>You can set the value of PARALLEL in your scripts in conjunction with the 
operator (see the example below). You can also set the value of PARALLEL for 
all scripts using the <a href="piglatin_reference.html#set">set</a> command.</p>
+<p>You can include the PARALLEL clause with any operator that starts a reduce 
phase (see the example below). This includes  
+<a href="piglatin_ref2.html#COGROUP">COGROUP</a>, 
+<a href="piglatin_ref2.html#CROSS">CROSS</a>, 
+<a href="piglatin_ref2.html#DISTINCT">DISTINCT</a>, 
+<a href="piglatin_ref2.html#GROUP">GROUP</a>, 
+<a href="piglatin_ref2.html#JOIN+%28inner%29">JOIN (inner)</a>, 
+<a href="piglatin_ref2.html#JOIN+%28outer%29">JOIN (outer)</a>, and
+<a href="piglatin_ref2.html#ORDER">ORDER</a>.
+ 
+You can also set the value of PARALLEL for all scripts using the <a 
href="piglatin_ref2.html#set">set</a> command.</p>
 
 <p>Example</p>
 
@@ -251,7 +256,6 @@
 B = group A by t PARALLEL 18;
 .....
 </source>
-
 </section>
 
 <section>

Modified: 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/index.xml
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/index.xml?rev=903018&r1=903017&r2=903018&view=diff
==============================================================================
--- 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/index.xml
 (original)
+++ 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/index.xml
 Tue Jan 26 00:02:23 2010
@@ -31,8 +31,8 @@
         Then try out the <a href="tutorial.html">Pig Tutorial</a> to get an 
idea of how easy it is to use Pig. 
       </p>
       <p>  
-        When you are ready to start writing your own scripts, read through the 
<a href="piglatin_users.html">Pig Latin Users Guide</a>
-        and the <a href="piglatin_reference.html">Pig Latin Reference 
Manual</a> to become familiar with Pig's features. 
+        When you are ready to start writing your own scripts, read through the 
Pig Latin Reference <a href="piglatin_ref1.html">Manual 1</a>
+        and <a href="piglatin_ref2.html">Manual 2</a> to become familiar with 
Pig's features. 
         Also review the <a href="cookbook.html">Pig Cookbook</a> to learn how 
to tweak your code for optimal performance.
       </p>
       <p>

Added: 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_ref1.xml
URL: 
http://svn.apache.org/viewvc/hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_ref1.xml?rev=903018&view=auto
==============================================================================
--- 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_ref1.xml
 (added)
+++ 
hadoop/pig/branches/load-store-redesign/src/docs/src/documentation/content/xdocs/piglatin_ref1.xml
 Tue Jan 26 00:02:23 2010
@@ -0,0 +1,784 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--  Copyright 2002-2004 The Apache Software Foundation
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
+          "http://forrest.apache.org/dtd/document-v20.dtd";>
+  
+  <!-- BEGIN DOCUMENT-->
+  
+<document>
+<header>
+<title>Pig Latin Reference Manual 1</title>
+</header>
+<body>
+ 
+ 
+ <!-- ABOUT PIG LATIN -->
+   <section>
+   <title>Overview</title>
+   <p>Use this manual together with <a href="piglatin_ref2.html">Pig Latin 
Reference Manual 2</a>. </p>
+   <p>Also, be sure to review the information in the <a 
href="cookbook.html">Pig Cookbook</a>. </p>
+    </section>
+    
+   <!-- PIG LATIN STATEMENTS -->
+   <section>
+       <title>Pig Latin Statements</title>     
+   <p>A Pig Latin statement is an operator that takes a <a 
href="piglatin_ref2.html#Relations%2C+Bags%2C+Tuples%2C+Fields">relation</a> 
+   as input and produces another relation as output. 
+   (This definition applies to all Pig Latin operators except LOAD and STORE 
which read data from and write data to the file system.) 
+   Pig Latin statements can span multiple lines and must end with a semi-colon 
( ; ). 
+   Pig Latin statements are generally organized in the following manner: </p>
+   <ol>
+      <li>
+         <p>A LOAD statement reads data from the file system. </p>
+      </li>
+      <li>
+         <p>A series of "transformation" statements process the data. </p>
+      </li>
+      <li>
+         <p>A STORE statement writes output to the file system; or, a DUMP 
statement displays output to the screen.</p>
+      </li>
+   </ol>
+  
+   <section>
+   <title>Running Pig Latin </title>
+   <p>You can execute Pig Latin statements interactively or in batch mode 
using Pig scripts (see the <a href="piglatin_ref2.html#exec">exec</a> and <a 
href="piglatin_ref2.html#run">run</a> commands).</p>
+   
+   <p>Grunt Shell, Interactive or Batch Mode</p>
+   <source>
+$ pig 
+... - Connecting to ...
+grunt> A = load 'data';
+grunt> B = ... ;
+or
+grunt> exec myscript.pig;
+or
+grunt> run myscript.pig;
+</source> 
+
+<p>Command Line, Batch Mode</p>
+   <source>
+$ pig myscript.pig
+</source> 
+
+<p></p>
+   <p><em>In general</em>, Pig processes Pig Latin statements as follows:</p>
+   <ol>
+      <li>
+         <p>First, Pig validates the syntax and semantics of all 
statements.</p>
+      </li>
+      <li>
+         <p>Next, if Pig encounters a DUMP or STORE, Pig will execute the 
statements.</p>
+      </li>
+   </ol>  
+   <p></p>
+   <p>In this example Pig will validate, but not execute, the LOAD and FOREACH 
statements.</p>
+
+<source>
+A = LOAD 'student' USING PigStorage() AS (name:chararray, age:int, gpa:float);
+B = FOREACH A GENERATE name;
+</source>   
+
+   <p>In this example, Pig will validate and then execute the LOAD, FOREACH, 
and DUMP statements.</p>
+   
+<source>
+A = LOAD 'student' USING PigStorage() AS (name:chararray, age:int, gpa:float);
+B = FOREACH A GENERATE name;
+DUMP B;
+(John)
+(Mary)
+(Bill)
+(Joe)
+</source>   
+   
+   <p> </p>
+   <p>Note: See Multi-Query Execution for more information on how Pig Latin 
statements are processed.</p> 
+   </section>
+   
+   <section>
+   <title>Retrieving Pig Latin Results</title>
+   <p>Pig Latin includes operators you can use to retrieve the results of your 
Pig Latin statements: </p>
+   <ol>
+      <li>
+         <p>Use the DUMP operator to display results to a screen. </p>
+      </li>
+      <li>
+         <p>Use the STORE operator to write results to a file on the file 
system.</p>
+      </li>
+   </ol>
+   </section>   
+   
+   
+   <section>
+   <title>Debugging Pig Latin</title>
+   <p>Pig Latin includes operators that can help you debug your Pig Latin 
statements:</p>
+   <ol>
+      <li>
+         <p>Use the DESCRIBE operator to review the schema of a relation.</p>
+      </li>
+      <li>
+         <p>Use the EXPLAIN operator to view the logical, physical, or map 
reduce execution plans to compute a relation.</p>
+      </li>
+      <li>
+         <p>Use the ILLUSTRATE operator to view the step-by-step execution of 
a series of statements.</p>
+      </li>
+   </ol>
+   </section>
+   
+   <section>
+   <title>Working with Data</title>
+   <p>Pig Latin allows you to work with data in many ways. In general, and as 
a starting point:</p>
+   <ol>
+      <li>
+         <p>Use the FILTER operator to work with tuples or rows of data. Use 
the FOREACH operator to work with columns of data.</p>
+      </li>
+      <li>
+         <p>Use the GROUP operator to group data in a single relation. Use the 
COGROUP and JOIN operators to group or join data in two or more relations.</p>
+      </li>
+      <li>
+         <p>Use the UNION operator to merge the contents of two or more 
relations. Use the SPLIT operator to partition the contents of a relation into 
multiple relations.</p>
+      </li>
+   </ol>
+   </section>
+ 
+   <section>
+   <title>Using Comments in Scripts</title>
+   <p>If you place Pig Latin statements in a script, the script can include 
comments. </p>
+   <ol>
+      <li>
+         <p>For multi-line comments use /* …. */</p>
+      </li>
+      <li>
+         <p>For single line comments use --</p>
+      </li>
+   </ol>
+<source>
+/* myscript.pig
+My script includes three simple Pig Latin Statements.
+*/
+
+A = LOAD 'student' USING PigStorage() AS (name:chararray, age:int, gpa:float); 
-- load statement
+B = FOREACH A GENERATE name;  -- foreach statement
+DUMP B;  --dump statement
+</source>   
+</section>
+
+ <section>
+   <title>Case Sensitivity</title>
+   <p>The names (aliases) of relations and fields are case sensitive. The 
names of Pig Latin functions are case sensitive. 
+   The names of parameters (see Parameter Substitution) and all other Pig 
Latin keywords are case insensitive.</p>
+   <p>In the example below, note the following:</p>
+   <ol>
+      <li>
+         <p>The names (aliases) of relations A, B, and C are case 
sensitive.</p>
+      </li>
+      <li>
+         <p>The names (aliases) of fields f1, f2, and f3 are case 
sensitive.</p>
+      </li>
+      <li>
+         <p>Function names PigStorage and COUNT are case sensitive.</p>
+      </li>
+      <li>
+         <p>Keywords LOAD, USING, AS, GROUP, BY, FOREACH, GENERATE, and DUMP 
are case insensitive. 
+         They can also be written as load, using, as, group, by, etc.</p>
+      </li>
+      <li>
+         <p>In the FOREACH statement, the field in relation B is referred to 
by positional notation ($0).</p>
+      </li>
+   </ol>
+   <p/>
+
+<source>
+grunt> A = LOAD 'data' USING PigStorage() AS (f1:int, f2:int, f3:int);
+grunt> B = GROUP A BY f1;
+grunt> C = FOREACH B GENERATE COUNT ($0);
+grunt> DUMP C;
+</source>
+</section>    
+</section>  
+<!-- END PIG LATIN STATEMENTS -->
+
+ 
+
+<!-- MULTI-QUERY EXECUTION-->
+<section>
+<title>Multi-Query Execution</title>
+<p>With multi-query execution Pig processes an entire script or a batch of 
statements at once.</p>
+
+<section>
+       <title>Turning it On or Off</title>     
+       <p>Multi-query execution is turned on by default. 
+       To turn it off and revert to Pig's "execute-on-dump/store" behavior, 
use the "-M" or "-no_multiquery" options. </p>
+       <p>To run script "myscript.pig" without the optimization, execute Pig 
as follows: </p>
+<source>
+$ pig -M myscript.pig
+or
+$ pig -no_multiquery myscript.pig
+</source>
+</section>
+
+<section>
+<title>How it Works</title>
+<p>Multi-query execution introduces some changes:</p>
+
+<ol>
+<li>
+<p>For batch mode execution, the entire script is first parsed to determine if 
intermediate tasks 
+can be combined to reduce the overall amount of work that needs to be done; 
execution starts only after the parsing is completed 
+(see the <a href="piglatin_ref2.html#EXPLAIN">EXPLAIN</a> operator and the <a 
href="piglatin_ref2.html#exec">exec</a> and <a 
href="piglatin_ref2.html#run">run</a> commands). </p>
+
+</li>
+<li>
+<p>Two run scenarios are optimized, as explained below: explicit and implicit 
splits, and storing intermediate results.</p>
+</li>
+</ol>
+
+<section>
+       <title>Explicit and Implicit Splits</title>
+<p>There might be cases in which you want different processing on separate 
parts of the same data stream.</p>
+<p>Example 1:</p>
+<source>
+A = LOAD ...
+...
+SPLIT A' INTO B IF ..., C IF ...
+...
+STORE B' ...
+STORE C' ...
+</source>
+<p>Example 2:</p>
+<source>
+A = LOAD ...
+...
+B = FILTER A' ...
+C = FILTER A' ...
+...
+STORE B' ...
+STORE C' ...
+</source>
+<p>In prior Pig releases, Example 1 will dump A' to disk and then start jobs 
for B' and C'. 
+Example 2 will execute all the dependencies of B' and store it and then 
execute all the dependencies of C' and store it. 
+Both are equivalent, but the performance will be different. </p>
+<p>Here's what the multi-query execution does to increase the performance: </p>
+       <ol>
+               <li><p>For Example 2, adds an implicit split to transform the 
query to Example 1. 
+               This eliminates the processing of A' multiple times.</p></li>
+               <li><p>Makes the split non-blocking and allows processing to 
continue. 
+               This helps reduce the amount of data that has to be stored 
right at the split.  </p></li>
+               <li><p>Allows multiple outputs from a job. This way some 
results can be stored as a side-effect of the main job. 
+               This is also necessary to make the previous item work.  
</p></li>
+               <li><p>Allows multiple split branches to be carried on to the 
combiner/reducer. 
+               This reduces the amount of IO again in the case where multiple 
branches in the split can benefit from a combiner run. </p></li>
+       </ol>
+</section>
+
+<section>
+       <title>Storing Intermediate Results</title>
+<p>Sometimes it is necessary to store intermediate results. </p>
+
+<source>
+A = LOAD ...
+...
+STORE A'
+...
+STORE A''
+</source>
+
+<p>If the script doesn't re-load A' for the processing of A the steps above A' 
will be duplicated. 
+This is a special case of Example 2 above, so the same steps are recommended. 
+With multi-query execution, the script will process A and dump A' as a 
side-effect.</p>
+</section>
+</section>
+
+<section>
+       <title>Store vs. Dump</title>
+       <p>With multi-query exection, you want to use <a 
href="piglatin_ref2.html#STORE">STORE</a> to save (persist) your results. 
+       You do not want to use <a href="piglatin_ref2.html#DUMP">DUMP</a> as it 
will disable multi-query execution and is likely to slow down execution. (If 
you have included DUMP statements in your scripts for debugging purposes, you 
should remove them.) </p>
+       
+       <p>DUMP Example: In this script, because the DUMP command is 
interactive, the multi-query execution will be disabled and two separate jobs 
will be created to execute this script. The first job will execute A > B > DUMP 
while the second job will execute A > B > C > STORE.</p>
+       
+<source>
+A = LOAD ‘input’ AS (x, y, z);
+B = FILTER A BY x > 5;
+DUMP B;
+C = FOREACH B GENERATE y, z;
+STORE C INTO ‘output’;
+</source>
+       
+       <p>STORE Example: In this script, multi-query optimization will kick in 
allowing the entire script to be executed as a single job. Two outputs are 
produced: output1 and output2.</p>
+       
+<source>
+A = LOAD ‘input’ AS (x, y, z);
+B = FILTER A BY x > 5;
+STORE B INTO ‘output1’;
+C = FOREACH B GENERATE y, z;
+STORE C INTO ‘output2’;    
+</source>
+
+</section>
+<section>
+       <title>Error Handling</title>
+       <p>With multi-query execution Pig processes an entire script or a batch 
of statements at once. 
+       By default Pig tries to run all the jobs that result from that, 
regardless of whether some jobs fail during execution. 
+       To check which jobs have succeeded or failed use one of these options. 
</p>
+       
+       <p>First, Pig logs all successful and failed store commands. Store 
commands are identified by output path. 
+       At the end of execution a summary line indicates success, partial 
failure or failure of all store commands. </p>        
+       
+       <p>Second, Pig returns different code upon completion for these 
scenarios:</p>
+       <ol>
+               <li><p>Return code 0: All jobs succeeded</p></li>
+               <li><p>Return code 1: <em>Used for retrievable errors</em> 
</p></li>
+               <li><p>Return code 2: All jobs have failed </p></li>
+               <li><p>Return code 3: Some jobs have failed  </p></li>
+       </ol>
+       <p></p>
+       <p>In some cases it might be desirable to fail the entire script upon 
detecting the first failed job. 
+       This can be achieved with the "-F" or "-stop_on_failure" command line 
flag. 
+       If used, Pig will stop execution when the first failed job is detected 
and discontinue further processing. 
+       This also means that file commands that come after a failed store in 
the script will not be executed (this can be used to create "done" files). </p>
+       
+       <p>This is how the flag is used: </p>
+<source>
+$ pig -F myscript.pig
+or
+$ pig -stop_on_failure myscript.pig
+</source>
+
+</section>
+
+<section>
+       <title>Backward Compatibility</title>
+       
+       <p>Most existing Pig scripts will produce the same result with or 
without the multi-query execution. 
+       There are cases though where this is not true. Path names and schemes 
are discussed here.</p>
+       
+       <p>Any script is parsed in it's entirety before it is sent to 
execution. Since the current directory can change 
+       throughout the script any path used in LOAD or STORE statement is 
translated to a fully qualified and absolute path.</p>
+               
+       <p>In map-reduce mode, the following script will load from 
"hdfs://&lt;host&gt;:&lt;port&gt;/data1" and store into 
"hdfs://&lt;host&gt;:&lt;port&gt;/tmp/out1". </p>
+<source>
+cd /;
+A = LOAD 'data1';
+cd tmp;
+STORE A INTO 'out1';
+</source>
+
+       <p>These expanded paths will be passed to any LoadFunc or Slicer 
implementation. 
+       In some cases this can cause problems, especially when a 
LoadFunc/Slicer is not used to read from a dfs file or path 
+       (for example, loading from an SQL database). </p>
+       
+       <p>Solutions are to either: </p>
+       <ol>
+               <li><p>Specify "-M" or "-no_multiquery" to revert to the old 
names</p></li>
+               <li><p>Specify a custom scheme for the LoadFunc/Slicer </p></li>
+       </ol>   
+       
+       <p>Arguments used in a LOAD statement that have a scheme other than 
"hdfs" or "file" will not be expanded and passed to the LoadFunc/Slicer 
unchanged.</p>
+       <p>In the SQL case, the SQLLoader function is invoked with 
"sql://mytable". </p>
+
+<source>
+A = LOAD "sql://mytable" USING SQLLoader();
+</source>
+</section>
+
+<section>
+       <title>Implicit Dependencies</title>
+<p>If a script has dependencies on the execution order outside of what Pig 
knows about, execution may fail. </p>
+
+
+<section>
+       <title>Example</title>
+<p>In this script, MYUDF might try to read from out1, a file that A was just 
stored into. 
+However, Pig does not know that MYUDF depends on the out1 file and might 
submit the jobs 
+producing the out2 and out1 files at the same time.</p>
+<source>
+...
+STORE A INTO 'out1';
+B = LOAD 'data2';
+C = FOREACH B GENERATE MYUDF($0,'out1');
+STORE C INTO 'out2';
+</source>
+
+<p>To make the script work (to ensure that the right execution order is 
enforced) add the exec statement. 
+The exec statement will trigger the execution of the statements that produce 
the out1 file. </p>
+
+<source>
+...
+STORE A INTO 'out1';
+EXEC;
+B = LOAD 'data2';
+C = FOREACH B GENERATE MYUDF($0,'out1');
+STORE C INTO 'out2';
+</source>
+</section>
+
+<section>
+       <title>Example</title>
+<p>In this script, the STORE/LOAD operators have different file paths; 
however, the LOAD operator depends on the STORE operator.</p>
+<source>
+A = LOAD '/user/xxx/firstinput' USING PigStorage();
+B = group ....
+C = .... agrregation function
+STORE C INTO '/user/vxj/firstinputtempresult/days1';
+..
+Atab = LOAD '/user/xxx/secondinput' USING  PigStorage();
+Btab = group ....
+Ctab = .... agrregation function
+STORE Ctab INTO '/user/vxj/secondinputtempresult/days1';
+..
+E = LOAD '/user/vxj/firstinputtempresult/' USING  PigStorage();
+F = group ....
+G = .... aggregation function
+STORE G INTO '/user/vxj/finalresult1';
+
+Etab =LOAD '/user/vxj/secondinputtempresult/' USING  PigStorage();
+Ftab = group ....
+Gtab = .... aggregation function
+STORE Gtab INTO '/user/vxj/finalresult2';
+</source>
+
+<p>To make the script works, add the exec statement.  </p>
+
+<source>
+A = LOAD '/user/xxx/firstinput' USING PigStorage();
+B = group ....
+C = .... agrregation function
+STORE C INTO '/user/vxj/firstinputtempresult/days1';
+..
+Atab = LOAD '/user/xxx/secondinput' USING  PigStorage();
+Btab = group ....
+Ctab = .... agrregation function
+STORE Ctab INTO '/user/vxj/secondinputtempresult/days1';
+
+EXEC;
+
+E = LOAD '/user/vxj/firstinputtempresult/' USING  PigStorage();
+F = group ....
+G = .... aggregation function
+STORE G INTO '/user/vxj/finalresult1';
+..
+Etab =LOAD '/user/vxj/secondinputtempresult/' USING  PigStorage();
+Ftab = group ....
+Gtab = .... aggregation function
+STORE Gtab INTO '/user/vxj/finalresult2';
+</source>
+
+
+</section>
+</section>
+
+</section>
+<!-- END MULTI-QUERY EXECUTION-->
+
+
+
+<!-- SPECIALIZED JOINS-->
+<section>
+<title>Specialized Joins</title>
+<p>
+Pig Latin includes three "specialized" joins: replicated joins, skewed joins, 
and merge joins. </p>
+<ul>
+<li>Replicated, skewed, and merge joins can be performed using <a 
href="piglatin_ref2.html#JOIN+%28inner%29">inner joins</a>.</li>
+<li>Replicated and skewed joins can also be performed using <a 
href="piglatin_ref2.html#JOIN+%28outer%29">outer joins</a>.</li>
+</ul>
+
+<!-- FRAGMENT REPLICATE JOINS-->
+<section>
+<title>Replicated Joins</title>
+<p>Fragment replicate join is a special type of join that works well if one or 
more relations are small enough to fit into main memory. 
+In such cases, Pig can perform a very efficient join because all of the hadoop 
work is done on the map side. In this type of join the 
+large relation is followed by one or more small relations. The small relations 
must be small enough to fit into main memory; if they 
+don't, the process fails and an error is generated.</p>
+ 
+<section>
+<title>Usage</title>
+<p>Perform a replicated join with the USING clause (see <a 
href="piglatin_ref2.html#JOIN+%28inner%29">inner joins</a> and <a 
href="piglatin_ref2.html#JOIN+%28outer%29">outer joins</a>).
+In this example, a large relation is joined with two smaller relations. Note 
that the large relation comes first followed by the smaller relations; 
+and, all small relations together must fit into main memory, otherwise an 
error is generated. </p>
+<source>
+big = LOAD 'big_data' AS (b1,b2,b3);
+
+tiny = LOAD 'tiny_data' AS (t1,t2,t3);
+
+mini = LOAD 'mini_data' AS (m1,m2,m3);
+
+C = JOIN big BY b1, tiny BY t1, mini BY m1 USING "replicated";
+</source>
+</section>
+
+<section>
+<title>Conditions</title>
+<p>Fragment replicate joins are experimental; we don't have a strong sense of 
how small the small relation must be to fit 
+into memory. In our tests with a simple query that involves just a JOIN, a 
relation of up to 100 M can be used if the process overall 
+gets 1 GB of memory. Please share your observations and experience with us.</p>
+</section>
+</section>
+<!-- END FRAGMENT REPLICATE JOINS-->
+
+
+<!-- SKEWED JOINS-->
+<section>
+<title>Skewed Joins</title>
+
+<p>
+Parallel joins are vulnerable to the presence of skew in the underlying data. 
+If the underlying data is sufficiently skewed, load imbalances will swamp any 
of the parallelism gains. 
+In order to counteract this problem, skewed join computes a histogram of the 
key space and uses this 
+data to allocate reducers for a given key. Skewed join does not place a 
restriction on the size of the input keys. 
+It accomplishes this by splitting one of the input on the join predicate and 
streaming the other input. 
+</p>
+
+<p>
+Skewed join can be used when the underlying data is sufficiently skewed and 
you need a finer 
+control over the allocation of reducers to counteract the skew. It should also 
be used when the data 
+associated with a given key is too large to fit in memory.
+</p>
+
+<section>
+<title>Usage</title>
+<p>Perform a skewed join with the USING clause (see <a 
href="piglatin_ref2.html#JOIN+%28inner%29">inner joins</a> and <a 
href="piglatin_ref2.html#JOIN+%28outer%29">outer joins</a>). </p>
+<source>
+big = LOAD 'big_data' AS (b1,b2,b3);
+massive = LOAD 'massive_data' AS (m1,m2,m3);
+C = JOIN big BY b1, massive BY m1 USING "skewed";
+</source>
+</section>
+
+<section>
+<title>Conditions</title>
+<p>
+Skewed join will only work under these conditions: 
+</p>
+<ul>
+<li>Skewed join works with two-table inner join. Currently we do not support 
more than two tables for skewed join. 
+Specifying three-way (or more) joins will fail validation. For such joins, we 
rely on you to break them up into two-way joins.</li>
+<li>The pig.skewedjoin.reduce.memusage Java parameter specifies the fraction 
of heap available for the 
+reducer to perform the join. A low fraction forces pig to use more reducers 
but increases 
+copying cost. We have seen good performance when we set this value 
+in the range 0.1 - 0.4. However, note that this is hardly an accurate range. 
Its value 
+depends on the amount of heap available for the operation, the number of 
columns 
+in the input and the skew. An appropriate value is best obtained by conducting 
experiments to achieve 
+a good performance. The default value is =0.5=. </li>
+</ul>
+</section>
+</section><!-- END SKEWED JOINS-->
+
+
+<!-- MERGE JOIN-->
+<section>
+<title>Merge Joins</title>
+
+<p>
+Often user data is stored such that both inputs are already sorted on the join 
key. 
+In this case, it is possible to join the data in the map phase of a MapReduce 
job. 
+This provides a significant performance improvement compared to passing all of 
the data through 
+unneeded sort and shuffle phases. 
+</p>
+
+<p>
+Pig has implemented a merge join algorithm, or sort-merge join, although in 
this case the sort is already 
+assumed to have been done (see the Conditions, below). 
+
+Pig implements the merge join algorithm by selecting the left input of the 
join to be the input file for the map phase, 
+and the right input of the join to be the side file. It then samples records 
from the right input to build an
+ index that contains, for each sampled record, the key(s) the filename and the 
offset into the file the record 
+ begins at. This sampling is done in an initial map only job. A second 
MapReduce job is then initiated, 
+ with the left input as its input. Each map uses the index to seek to the 
appropriate record in the right 
+ input and begin doing the join. 
+</p>
+
+<section>
+<title>Usage</title>
+<p>Perform a merge join with the USING clause (see <a 
href="piglatin_ref2.html#JOIN+%28inner%29">inner joins</a>).</p>
+<source>
+C = JOIN A BY a1, B BY b1 USING "merge";
+</source>
+</section>
+
+<section>
+<title>Conditions</title>
+<p>
+Merge join will only work under these conditions: 
+</p>
+
+<ul>
+<li>Both inputs are sorted in *ascending* order of join keys. If an input 
consists of many files, there should be 
+a total ordering across the files in the *ascending order of file name*. So 
for example if one of the inputs to the 
+join is a directory called input1 with files a and b under it, the data should 
be sorted in ascending order of join 
+key when read starting at a and ending in b. Likewise if an input directory 
has part files part-00000, part-00001, 
+part-00002 and part-00003, the data should be sorted if the files are read in 
the sequence part-00000, part-00001, 
+part-00002 and part-00003. </li>
+<li>The merge join only has two inputs </li>
+<li>The loadfunc for the right input of the join should implement the 
SamplableLoader interface (PigStorage does 
+implement the SamplableLoader interface). </li>
+<li>Only inner join will be supported </li>
+
+<li>Between the load of the sorted input and the merge join statement there 
can only be filter statements and 
+foreach statement where the foreach statement should meet the following 
conditions: 
+<ul>
+<li>There should be no UDFs in the foreach statement </li>
+<li>The foreach statement should not change the position of the join keys </li>
+<li>There should not transformation on the join keys which will change the 
sort order </li>
+</ul>
+</li>
+
+</ul>
+<p></p>
+<p>
+For optimal performance, each part file of the left (sorted) input of the join 
should have a size of at least 
+1 hdfs block size (for example if the hdfs block size is 128 MB, each part 
file should be less than 128 MB). 
+If the total input size (including all part files) is greater than blocksize, 
then the part files should be uniform in size 
+(without large skews in sizes). The main idea is to eliminate skew in the 
amount of input the final map 
+job performing the merge-join will process. 
+</p>
+
+<p>
+In local mode, merge join will revert to regular join.
+</p>
+</section>
+</section><!-- END MERGE JOIN -->
+
+
+</section>
+<!-- END SPECIALIZED JOINS-->
+ 
+ <!-- OPTIMIZATION RULES -->
+<section>
+<title>Optimization Rules</title>
+<p>Pig supports various optimization rules. By default optimization, and all 
optimization rules, are turned on. 
+To turn off optimiztion, use:</p>
+
+<source>
+pig -optimizer_off [opt_rule | all ]
+</source>
+
+<p>Note that some rules are mandatory and cannot be turned off.</p>
+
+<section>
+<title>ImplicitSplitInserter</title>
+<p>Status: Mandatory</p>
+<p>
+<a href="piglatin_ref2.html#SPLIT">SPLIT</a> is the only operator that models 
multiple outputs in Pig. 
+To ease the process of building logical plans, all operators are allowed to 
have multiple outputs. As part of the 
+optimization, all non-split operators that have multiple outputs are altered 
to have a SPLIT operator as the output 
+and the outputs of the operator are then made outputs of the SPLIT operator. 
An example will illustrate the point. 
+Here, a split will be inserted after the LOAD and the split outputs will be 
connected to the FILTER (b) and the COGROUP (c).
+</p>
+<source>
+A = LOAD 'input';
+B = FILTER A BY $1 == 1;
+C = COGROUP A BY $0, B BY $0;
+</source>
+</section>
+
+<section>
+<title>TypeCastInserter</title>
+<p>Status: Mandatory</p>
+<p>
+If you specify a <a href="piglatin_ref2.html#Schemas">schema</a> with the 
+<a href="piglatin_ref2.html#LOAD">LOAD</a> statement, the optimizer will 
perform a pre-fix projection of the columns 
+and <a href="piglatin_ref2.html#Cast+Operators">cast</a> the columns to the 
appropriate types. An example will illustrate the point. 
+The LOAD statement (a) has a schema associated with it. The optimizer will 
insert a FOREACH operator that will project columns 0, 1 and 2 
+and also cast them to chararray, int and float respectively. 
+</p>
+<source>
+A = LOAD 'input' AS (name: chararray, age: int, gpa: float);
+B = FILER A BY $1 == 1;
+C = GROUP A By $0;
+</source>
+</section>
+
+<section>
+<title>StreamOptimizer</title>
+<p>
+Optimize when <a href="piglatin_ref2.html#LOAD">LOAD</a> precedes <a 
href="piglatin_ref2.html#STREAM">STREAM</a> 
+and the loader class is the same as the serializer for the stream. Similarly, 
optimize when STREAM is followed by 
+<a href="piglatin_ref2.html#STORE">STORE</a> and the deserializer class is 
same as the storage class. 
+For both of these cases the optimization is to replace the loader/serializer 
with BinaryStorage which just moves bytes 
+around and to replace the storer/deserializer with BinaryStorage.
+</p>
+
+</section>
+
+<section>
+<title>OpLimitOptimizer</title>
+<p>
+The objective of this rule is to push the <a 
href="piglatin_ref2.html#LIMIT">LIMIT</a> operator up the data flow graph 
+(or down the tree for database folks). In addition, for top-k (ORDER BY 
followed by a LIMIT) the LIMIT is pushed into the ORDER BY.
+</p>
+<source>
+A = LOAD 'input';
+B = ORDER A BY $0;
+C = LIMIT B 10;
+</source>
+</section>
+
+<section>
+<title>PushUpFilters</title>
+<p>
+The objective of this rule is to push the <a 
href="piglatin_ref2.html#FILTER">FILTER</a> operators up the data flow graph. 
+As a result, the number of records that flow through the pipeline is reduced. 
+</p>
+<source>
+A = LOAD 'input';
+B = GROUP A BY $0;
+C = FILTER B BY $0 &lt; 10;
+</source>
+</section>
+
+<section>
+<title>PushDownExplodes</title>
+<p>
+The objective of this rule is to reduce the number of records that flow 
through the pipeline by moving 
+<a href="piglatin_ref2.html#FOREACH">FOREACH</a> operators with a 
+<a href="piglatin_ref2.html#Flatten+Operator">FLATTEN</a> down the data flow 
graph. 
+In the example shown below, it would be more efficient to move the foreach 
after the join to reduce the cost of the join operation.
+</p>
+<source>
+A = LOAD 'input' AS (a, b, c);
+B = LOAD 'input2' AS (x, y, z);
+C = FOREACH A GENERATE FLATTEN($0), B, C;
+D = JOIN C BY $1, B BY $1;
+</source>
+</section>
+</section> <!-- END OPTIMIZATION RULES -->
+
+ <!-- MEMORY MANAGEMENT -->
+<section>
+<title>Memory Management</title>
+<p>For Pig 0.6.0 we changed how Pig decides when to spill bags to disk. In the 
past, Pig tried to figure out when an application was getting close to memory 
limit and then spill at that time. However, because Java does not include an 
accurate way to determine when to spill, Pig often ran out of memory. </p>
+
+<p>In the current version, we allocate a fix amount of memory to store bags 
and spill to disk as soon as the memory limit is reached. This is very similar 
to how Hadoop decides when to spill data accumulated by the combiner. </p>
+
+<p>The amount of memory allocated to bags is determined by 
pig.cachedbag.memusage; the default is set to 10% of available memory. Note 
that this memory is shared across all large bags used by the application.</p>
+
+</section> 
+<!-- END MEMORY MANAGEMENT  -->
+
+ 
+  <!-- ZEBRA INTEGRATION -->
+<section>
+<title>Zebra Integration</title>
+<p>For information about how to integrate Zebra with your Pig scripts, see <a 
href="zebra_pig.html">Zebra and Pig</a>.</p>
+ </section> 
+<!-- END ZEBRA INTEGRATION  -->
+ 
+ 
+ 
+ </body>
+ </document>
+  
+   


Reply via email to