Added: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinInteger.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinInteger.java?rev=833166&view=auto
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinInteger.java
 (added)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinInteger.java
 Thu Nov  5 21:02:57 2009
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Iterator;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.pig.TableStorer;
+import org.apache.hadoop.zebra.schema.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigServer;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.test.MiniCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Note:
+ * 
+ * Make sure you add the build/pig-0.1.0-dev-core.jar to the Classpath of the
+ * app/debug configuration, when run this from inside the Eclipse.
+ * 
+ */
+public class TestTableMergeJoinInteger {
+  protected static ExecType execType = ExecType.MAPREDUCE;
+  private static MiniCluster cluster;
+  protected static PigServer pigServer;
+  private static Path pathTable;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    if (System.getProperty("hadoop.log.dir") == null) {
+      String base = new File(".").getPath(); // getAbsolutePath();
+      System
+          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
+    }
+
+    if (execType == ExecType.MAPREDUCE) {
+      cluster = MiniCluster.buildCluster();
+      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+    } else {
+      pigServer = new PigServer(ExecType.LOCAL);
+    }
+
+    Configuration conf = new Configuration();
+    FileSystem fs = cluster.getFileSystem();
+    Path pathWorking = fs.getWorkingDirectory();
+    pathTable = new Path(pathWorking, "TestTableStorer");
+    System.out.println("pathTable =" + pathTable);
+    BasicTable.Writer writer = new BasicTable.Writer(pathTable,
+        "SF_a:int,SF_b:string,SF_c,SF_d,SF_e,SF_f,SF_g",
+        "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);
+    Schema schema = writer.getSchema();
+    System.out.println("typeName" + 
schema.getColumn("SF_a").getType().pigDataType());
+    Tuple tuple = TypesUtils.createTuple(schema);
+
+    final int numsBatch = 10;
+    final int numsInserters = 1;
+    TableInserter[] inserters = new TableInserter[numsInserters];
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i] = writer.getInserter("ins" + i, false);
+    }
+
+    for (int b = 0; b < numsBatch; b++) {
+      for (int i = 0; i < numsInserters; i++) {
+        TypesUtils.resetTuple(tuple);
+        for (int k = 0; k < tuple.size(); ++k) {
+          try {
+               if(k==0) {
+                       tuple.set(0, k+b);
+               } else {
+                       tuple.set(k, b + "_" + i + "" + k);
+               }
+          } catch (ExecException e) {
+            e.printStackTrace();
+          }
+        }
+        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
+      }
+    }
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i].close();
+    }
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    pigServer.shutdown();
+  }
+
+  /**
+   * Return the name of the routine that called getCurrentMethodName
+   * 
+   */
+  public String getCurrentMethodName() {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    (new Throwable()).printStackTrace(pw);
+    pw.flush();
+    String stackTrace = baos.toString();
+    pw.close();
+
+    StringTokenizer tok = new StringTokenizer(stackTrace, "\n");
+    tok.nextToken(); // 'java.lang.Throwable'
+    tok.nextToken(); // 'at ...getCurrentMethodName'
+    String l = tok.nextToken(); // 'at ...<caller to getCurrentRoutine>'
+    // Parse line 3
+    tok = new StringTokenizer(l.trim(), " <(");
+    String t = tok.nextToken(); // 'at'
+    t = tok.nextToken(); // '...<caller to getCurrentRoutine>'
+    return t;
+  }
+
+  @Test
+  public void testStorer() throws ExecException, IOException {
+    /*
+     * Use pig LOAD to load testing data for store
+     */
+    String query = "records = LOAD '" + pathTable.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+    pigServer.registerQuery(query);
+
+
+    String orderby = "srecs = ORDER records BY SF_a;";
+    pigServer.registerQuery(orderby);
+
+    Path newPath = new Path(getCurrentMethodName());
+
+    /*
+     * Table1 creation
+     */
+
+    
+    pigServer
+        .store(
+            "srecs",
+            newPath.toString()+"1",
+            TableStorer.class.getCanonicalName()
+                + "('[SF_a, SF_b, SF_c]; [SF_e]')");
+
+    String query3 = "records1 = LOAD '"
+        + newPath.toString() + "1"
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('SF_a, SF_b', 
'sorted');";
+    pigServer.registerQuery(query3);
+    /*
+     * Table2 creation
+     */
+
+    pigServer
+        .store(
+            "srecs",
+            newPath.toString()+"2",
+            TableStorer.class.getCanonicalName()
+                + "('[SF_a, SF_b, SF_c]; [SF_e]')");
+
+
+    String query4 = "records2 = LOAD '"
+        + newPath.toString() + "2"
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+    pigServer.registerQuery(query4);
+
+    String join = "joinRecords = JOIN records1 BY SF_a, records2 BY SF_a USING 
\"merge\";";
+    pigServer.registerQuery(join);
+    // check JOIN content
+    Iterator<Tuple> it3 = pigServer.openIterator("joinRecords");
+    int row = 0;
+    Tuple RowValue3 = null;
+    while (it3.hasNext()) {
+      // Last row value
+      RowValue3 = it3.next();
+      Assert.assertEquals(9, RowValue3.size());
+      row++;
+      if (row == 10) {
+        Assert.assertEquals("9_01", RowValue3.get(1));
+        Assert.assertEquals(9, RowValue3.get(0));
+        Assert.assertEquals("9_06", RowValue3.get(8));
+        Assert.assertEquals("9_05", RowValue3.get(7));
+        Assert.assertEquals("9_04", RowValue3.get(6));
+        Assert.assertEquals("9_03", RowValue3.get(5));
+        Assert.assertEquals("9_02", RowValue3.get(4));
+        Assert.assertEquals("9_01", RowValue3.get(3));
+        Assert.assertEquals(9, RowValue3.get(2));
+      }
+    }
+    Assert.assertEquals(10, row);
+  }
+}

Added: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinMultipleColsSort.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinMultipleColsSort.java?rev=833166&view=auto
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinMultipleColsSort.java
 (added)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableMergeJoinMultipleColsSort.java
 Thu Nov  5 21:02:57 2009
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Iterator;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.pig.TableStorer;
+import org.apache.hadoop.zebra.schema.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigServer;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.test.MiniCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Note:
+ * 
+ * Make sure you add the build/pig-0.1.0-dev-core.jar to the Classpath of the
+ * app/debug configuration, when run this from inside the Eclipse.
+ * 
+ */
+public class TestTableMergeJoinMultipleColsSort {
+  protected static ExecType execType = ExecType.MAPREDUCE;
+  private static MiniCluster cluster;
+  protected static PigServer pigServer;
+  private static Path pathTable;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    if (System.getProperty("hadoop.log.dir") == null) {
+      String base = new File(".").getPath(); // getAbsolutePath();
+      System
+          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
+    }
+
+    if (execType == ExecType.MAPREDUCE) {
+      cluster = MiniCluster.buildCluster();
+      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+    } else {
+      pigServer = new PigServer(ExecType.LOCAL);
+    }
+
+    Configuration conf = new Configuration();
+    FileSystem fs = cluster.getFileSystem();
+    Path pathWorking = fs.getWorkingDirectory();
+    pathTable = new Path(pathWorking, "TestTableStorer");
+    System.out.println("pathTable =" + pathTable);
+    BasicTable.Writer writer = new BasicTable.Writer(pathTable,
+        "SF_a:int,SF_b:int,SF_c,SF_d,SF_e,SF_f,SF_g",
+        "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);
+    Schema schema = writer.getSchema();
+    System.out.println("typeName" + 
schema.getColumn("SF_a").getType().pigDataType());
+    Tuple tuple = TypesUtils.createTuple(schema);
+
+    final int numsBatch = 10;
+    final int numsInserters = 1;
+    TableInserter[] inserters = new TableInserter[numsInserters];
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i] = writer.getInserter("ins" + i, false);
+    }
+
+    for (int b = 0; b < numsBatch; b++) {
+      for (int i = 0; i < numsInserters; i++) {
+        TypesUtils.resetTuple(tuple);
+        for (int k = 0; k < tuple.size(); ++k) {
+          try {
+               if(k==0) {
+                       tuple.set(0, k+b);
+               } else if(k==1) {
+                       tuple.set(1, numsBatch - b);
+               } else {
+                       tuple.set(k, b + "_" + i + "" + k);
+               }
+          } catch (ExecException e) {
+            e.printStackTrace();
+          }
+        }
+        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
+      }
+    }
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i].close();
+    }
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    pigServer.shutdown();
+  }
+
+  /**
+   * Return the name of the routine that called getCurrentMethodName
+   * 
+   */
+  public String getCurrentMethodName() {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    (new Throwable()).printStackTrace(pw);
+    pw.flush();
+    String stackTrace = baos.toString();
+    pw.close();
+
+    StringTokenizer tok = new StringTokenizer(stackTrace, "\n");
+    tok.nextToken(); // 'java.lang.Throwable'
+    tok.nextToken(); // 'at ...getCurrentMethodName'
+    String l = tok.nextToken(); // 'at ...<caller to getCurrentRoutine>'
+    // Parse line 3
+    tok = new StringTokenizer(l.trim(), " <(");
+    String t = tok.nextToken(); // 'at'
+    t = tok.nextToken(); // '...<caller to getCurrentRoutine>'
+    return t;
+  }
+
+  @Test
+  public void testStorer() throws ExecException, IOException {
+    /*
+     * Use pig LOAD to load testing data for store
+     */
+    String query = "records = LOAD '" + pathTable.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+    pigServer.registerQuery(query);
+
+
+    String orderby = "srecs = ORDER records BY SF_a,SF_b;";
+    pigServer.registerQuery(orderby);
+
+    Path newPath = new Path(getCurrentMethodName());
+
+    /*
+     * Table1 creation
+     */
+
+    
+    pigServer
+        .store(
+            "srecs",
+            newPath.toString()+"1",
+            TableStorer.class.getCanonicalName()
+                + "('[SF_a, SF_b, SF_c]; [SF_e]')");
+
+    String query3 = "records1 = LOAD '"
+        + newPath.toString() + "1"
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('SF_a, SF_b', 
'sorted');";
+    pigServer.registerQuery(query3);
+    /*
+     * Table2 creation
+     */
+
+    pigServer
+        .store(
+            "srecs",
+            newPath.toString()+"2",
+            TableStorer.class.getCanonicalName()
+                + "('[SF_a, SF_b, SF_c]; [SF_e]')");
+
+
+    String query4 = "records2 = LOAD '"
+        + newPath.toString() + "2"
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+    pigServer.registerQuery(query4);
+
+    String join = "joinRecords = JOIN records1 BY (SF_a, SF_b), records2 BY 
(SF_a,SF_b) USING \"merge\";";
+    pigServer.registerQuery(join);
+    // check JOIN content
+    Iterator<Tuple> it3 = pigServer.openIterator("joinRecords");
+    int row = 0;
+    Tuple RowValue3 = null;
+    while (it3.hasNext()) {
+      // Last row value
+      RowValue3 = it3.next();
+      Assert.assertEquals(9, RowValue3.size());
+      row++;
+      if (row == 10) {
+        Assert.assertEquals(1, RowValue3.get(1));
+        Assert.assertEquals(9, RowValue3.get(0));
+        Assert.assertEquals("9_06", RowValue3.get(8));
+        Assert.assertEquals("9_05", RowValue3.get(7));
+        Assert.assertEquals("9_04", RowValue3.get(6));
+        Assert.assertEquals("9_03", RowValue3.get(5));
+        Assert.assertEquals("9_02", RowValue3.get(4));
+        Assert.assertEquals(1, RowValue3.get(3));
+        Assert.assertEquals(9, RowValue3.get(2));
+      }
+    }
+    Assert.assertEquals(10, row);
+  }
+}

Added: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableSortStorer.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableSortStorer.java?rev=833166&view=auto
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableSortStorer.java
 (added)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableSortStorer.java
 Thu Nov  5 21:02:57 2009
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Iterator;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.pig.TableStorer;
+import org.apache.hadoop.zebra.schema.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigServer;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.test.MiniCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Note:
+ * 
+ * Make sure you add the build/pig-0.1.0-dev-core.jar to the Classpath of the
+ * app/debug configuration, when run this from inside the Eclipse.
+ * 
+ */
+public class TestTableSortStorer {
+  protected static ExecType execType = ExecType.MAPREDUCE;
+  private static MiniCluster cluster;
+  protected static PigServer pigServer;
+  private static Path pathTable;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    if (System.getProperty("hadoop.log.dir") == null) {
+      String base = new File(".").getPath(); // getAbsolutePath();
+      System
+          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
+    }
+
+    if (execType == ExecType.MAPREDUCE) {
+      cluster = MiniCluster.buildCluster();
+      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+    } else {
+      pigServer = new PigServer(ExecType.LOCAL);
+    }
+
+    Configuration conf = new Configuration();
+    FileSystem fs = cluster.getFileSystem();
+    Path pathWorking = fs.getWorkingDirectory();
+    pathTable = new Path(pathWorking, "TestTableStorer");
+    System.out.println("pathTable =" + pathTable);
+    BasicTable.Writer writer = new BasicTable.Writer(pathTable,
+        "SF_a:string,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g",
+        "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+
+    final int numsBatch = 10;
+    final int numsInserters = 1;
+    TableInserter[] inserters = new TableInserter[numsInserters];
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i] = writer.getInserter("ins" + i, false);
+    }
+
+    for (int b = 0; b < numsBatch; b++) {
+      for (int i = 0; i < numsInserters; i++) {
+        TypesUtils.resetTuple(tuple);
+        for (int k = 0; k < tuple.size(); ++k) {
+          try {
+            tuple.set(k, (9-b) + "_" + i + "" + k);
+          } catch (ExecException e) {
+            e.printStackTrace();
+          }
+        }
+        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
+      }
+    }
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i].close();
+    }
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    pigServer.shutdown();
+  }
+
+  /**
+   * Return the name of the routine that called getCurrentMethodName
+   * 
+   */
+  public String getCurrentMethodName() {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    (new Throwable()).printStackTrace(pw);
+    pw.flush();
+    String stackTrace = baos.toString();
+    pw.close();
+
+    StringTokenizer tok = new StringTokenizer(stackTrace, "\n");
+    tok.nextToken(); // 'java.lang.Throwable'
+    tok.nextToken(); // 'at ...getCurrentMethodName'
+    String l = tok.nextToken(); // 'at ...<caller to getCurrentRoutine>'
+    // Parse line 3
+    tok = new StringTokenizer(l.trim(), " <(");
+    String t = tok.nextToken(); // 'at'
+    t = tok.nextToken(); // '...<caller to getCurrentRoutine>'
+    return t;
+  }
+
+  @Test
+  public void testStorer() throws ExecException, IOException {
+    /*
+     * Use pig LOAD to load testing data for store
+     */
+    String query = "records = LOAD '" + pathTable.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+    pigServer.registerQuery(query);
+
+    Iterator<Tuple> it2 = pigServer.openIterator("records");
+    int row0 = 0;
+    Tuple RowValue2 = null;
+    while (it2.hasNext()) {
+      // Last row value
+      RowValue2 = it2.next();
+      row0++;
+      if (row0 == 10) {
+        Assert.assertEquals("0_01", RowValue2.get(1));
+        Assert.assertEquals("0_00", RowValue2.get(0).toString());
+      }
+    }
+    Assert.assertEquals(10, row0);
+
+    String orderby = "srecs = ORDER records BY SF_a;";
+    pigServer.registerQuery(orderby);
+
+    /*
+     * Use pig STORE to store testing data BasicTable.Writer writer = new
+     * BasicTable.Writer(pathTable, "SF_a,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g",
+     * "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", false, conf);
+     */
+    Path newPath = new Path(getCurrentMethodName());
+
+    pigServer
+        .store(
+            "srecs",
+            newPath.toString(),
+            TableStorer.class.getCanonicalName()
+                + "('[SF_a, SF_b, SF_c]; [SF_e]')");
+
+    // check new table content
+    String query3 = "newRecords = LOAD '"
+        + newPath.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('SF_a, SF_b');";
+    pigServer.registerQuery(query3);
+
+    Iterator<Tuple> it3 = pigServer.openIterator("newRecords");
+    int row = 0;
+    Tuple RowValue3 = null;
+    while (it3.hasNext()) {
+      // Last row value
+      RowValue3 = it3.next();
+      row++;
+      if (row == 10) {
+        Assert.assertEquals("9_01", RowValue3.get(1));
+        Assert.assertEquals("9_00", RowValue3.get(0).toString());
+      }
+    }
+    Assert.assertEquals(10, row);
+  }
+}

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java
 Thu Nov  5 21:02:57 2009
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.util.Iterator;
 
+import junit.framework.Assert;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
@@ -36,6 +37,7 @@
 import org.apache.pig.ExecType;
 import org.apache.pig.PigServer;
 import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.backend.executionengine.ExecJob;
 import org.apache.pig.data.Tuple;
 import org.apache.pig.test.MiniCluster;
 import org.junit.After;
@@ -79,7 +81,7 @@
     System.out.println("pathTable =" + pathTable);
     BasicTable.Writer writer = new BasicTable.Writer(pathTable,
         "SF_a,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g",
-        "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", false, conf);
+        "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", conf);
     Schema schema = writer.getSchema();
     Tuple tuple = TypesUtils.createTuple(schema);
 
@@ -134,12 +136,13 @@
      * BasicTable.Writer(pathTable, "SF_a,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g",
      * "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", false, conf);
      */
-    pigServer
+    ExecJob pigJob = pigServer
         .store(
             "records",
             new Path(pathTable, "store").toString(),
             TableStorer.class.getCanonicalName()
                 + "('[SF_a, SF_b, SF_c]; [SF_e]')");
 
+    Assert.assertNull(pigJob.getException());
   }
 }
\ No newline at end of file

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java
 Thu Nov  5 21:02:57 2009
@@ -96,7 +96,7 @@
     System.out.println("pathTable1 =" + pathTable1);
 
     BasicTable.Writer writer = new BasicTable.Writer(pathTable1, STR_SCHEMA1,
-        STR_STORAGE1, false, conf);
+        STR_STORAGE1, conf);
     Schema schema = writer.getSchema();
     Tuple tuple = TypesUtils.createTuple(schema);
 
@@ -184,7 +184,7 @@
     System.out.println("pathTable2 =" + pathTable2);
 
     BasicTable.Writer writer2 = new BasicTable.Writer(pathTable2, STR_SCHEMA2,
-        STR_STORAGE2, false, conf);
+        STR_STORAGE2, conf);
     Schema schema2 = writer.getSchema();
 
     Tuple tuple2 = TypesUtils.createTuple(schema2);

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnGroupName.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnGroupName.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnGroupName.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnGroupName.java
 Thu Nov  5 21:02:57 2009
@@ -74,7 +74,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[f1, f2] as PI; [f3, f4] as General secure by 
user:joe perm:640 COMPRESS BY gz SERIALIZE BY avro; [f5, f6] as ULT";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 3 column group;
@@ -177,7 +177,7 @@
   public void testStorageValid2() {
     try {
       String strStorage = "[f1, f2] serialize by avro compress by gz; [f3, f4] 
SERIALIZE BY avro COMPRESS BY gz; [f5, f6]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -200,7 +200,7 @@
   public void testStorageValid3() {
     try {
       String strStorage = "[f1, f2] as PI serialize by avro compress by gz; 
[f3, f4] as General SERIALIZE BY avro COMPRESS BY gz; [f5, f6]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -223,7 +223,7 @@
   public void testStorageValid4() {
     try {
       String strStorage = "[f1, f2] as C1 serialize by avro compress by gz; 
[f3, f4] as C2 SERIALIZE BY avro COMPRESS BY gz; as C3";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -246,7 +246,7 @@
   public void testStorageValid5() {
     try {
       String strStorage = "[f1, f2] as C1 serialize by avro compress by gz; 
[f3, f4] as C2 SERIALIZE BY avro COMPRESS BY gz;";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -269,7 +269,7 @@
   public void testStorageValid6() {
     try {
       String strStorage = "[f1, f2] as PI serialize by avro compress by gz; 
[f3, f4] SERIALIZE BY avro COMPRESS BY gz; [f5, f6] as CG0";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -292,7 +292,7 @@
   public void testStorageValid7() {
     try {
       String strStorage = "[f1, f2] as PI serialize by avro compress by gz; 
[f3, f4] as Pi SERIALIZE BY avro COMPRESS BY gz; [f5, f6] as CG100";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -318,7 +318,7 @@
   public void testStorageValid8() {
     try {
       String strStorage = "[f1, f2] as C1 serialize by avro compress by gz; 
[f3, f4, f5, f6] as C2 SERIALIZE BY avro COMPRESS BY gz; as C3 compress by gz";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       Assert.assertEquals(cgschemas.length, 2);
       CGSchema cgs1 = cgschemas[0];
@@ -337,7 +337,7 @@
   public void testStorageInvalid1() {
     try {
       String strStorage = "[f1, f2] as C1 serialize by avro compress by gz; 
[f3, f4] as C1 SERIALIZE BY avro COMPRESS BY gz; [f5, f6] as C3";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -354,7 +354,7 @@
   public void testStorageInvalid2() {
     try {
       String strStorage = "[f1, f2] serialize by avro compress by gz as C1; 
[f3, f4] as C2 SERIALIZE BY avro COMPRESS BY gz; [f5, f6] as C3";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -366,4 +366,4 @@
       Assert.assertTrue(false);
     }
   }
-}
\ No newline at end of file
+}

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnSecurity.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnSecurity.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnSecurity.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestColumnSecurity.java
 Thu Nov  5 21:02:57 2009
@@ -133,8 +133,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -183,8 +182,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -245,8 +243,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -303,8 +300,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -362,8 +358,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -419,8 +414,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -480,7 +474,7 @@
     login(USER1);
 
     try {
-      new BasicTable.Writer(path1, schema, storage, false, conf);
+      new BasicTable.Writer(path1, schema, storage, conf);
       Assert.fail("write should fail");
     } catch (IOException e) {
     }
@@ -500,8 +494,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
     Schema schema1 = writer.getSchema();
     insertData(USER1.getUserName(), schema1, path1);
@@ -573,8 +566,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -634,7 +626,7 @@
     login(USER1);
 
     try {
-      new BasicTable.Writer(path1, schema, storage, false, conf);
+      new BasicTable.Writer(path1, schema, storage, conf);
       Assert.fail("write should fail");
     } catch (IOException e) {
     }
@@ -654,8 +646,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -693,8 +684,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -733,8 +723,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -772,8 +761,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -811,8 +799,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -859,8 +846,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -927,7 +913,7 @@
     login(USER1);
 
     try {
-      new BasicTable.Writer(path1, schema, storage, false, conf);
+      new BasicTable.Writer(path1, schema, storage, conf);
       Assert.fail("write should fail");
     } catch (IOException e) {
     }
@@ -949,7 +935,7 @@
     login(USER1);
 
     try {
-      new BasicTable.Writer(path1, schema, storage, false, conf);
+      new BasicTable.Writer(path1, schema, storage, conf);
       Assert.fail("write should fail");
     } catch (IOException e) {
     }
@@ -970,7 +956,7 @@
     fs.setPermission(path, new FsPermission((short) 0777));
     login(USER1);
     try {
-      new BasicTable.Writer(path1, schema, storage, false, conf);
+      new BasicTable.Writer(path1, schema, storage, conf);
       Assert.fail("write should fail");
     } catch (IOException e) {
     }
@@ -992,7 +978,7 @@
     login(USER1);
 
     try {
-      new BasicTable.Writer(path1, schema, storage, false, conf);
+      new BasicTable.Writer(path1, schema, storage, conf);
       Assert.fail("write should fail");
     } catch (IOException e) {
     }
@@ -1015,8 +1001,7 @@
     fs.setPermission(path, new FsPermission((short) 0777));
 
     login(SUPERUSER);
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();
@@ -1088,8 +1073,7 @@
     fs = path1.getFileSystem(conf);
     fs.setPermission(path, new FsPermission((short) 0777));
 
-    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
-        false, conf);
+    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage, 
conf);
     writer.finish();
 
     Schema schema1 = writer.getSchema();

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java
 Thu Nov  5 21:02:57 2009
@@ -49,7 +49,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[c1]; [c2]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 2 column group;
@@ -109,7 +109,7 @@
   public void testStorageValid2() {
     try {
       String strStorage = "[c1.f1]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       Assert.assertTrue(false);
       CGSchema[] cgschemas = p.getCGSchemas();
 

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java
 Thu Nov  5 21:02:57 2009
@@ -51,7 +51,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[m1#{k1}]; [m2#{k1}, f3]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 3 column group;
@@ -138,7 +138,7 @@
   public void testStorageValid2() {
     try {
       String strStorage = "[m1#{k1}]; [m1#{k2}, f3]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 3 column group;
@@ -222,7 +222,7 @@
   public void testStorageInvalid1() {
     try {
       String strStorage = "m1#{k1}";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -239,7 +239,7 @@
   public void testStorageInvalid2() {
     try {
       String strStorage = "[m1#{k1}] abc; [m1#{k2}, f3] xyz";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -256,7 +256,7 @@
   public void testStorageInvalid3() {
     try {
       String strStorage = "[m1{#k1}{#k2}]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java
 Thu Nov  5 21:02:57 2009
@@ -89,7 +89,7 @@
 //      String strStorage = "[r.r.f1,r.f2#{k1}] COMPRESS BY gzip SECURE BY 
user:ggg SECURE BY group:fff; [r.r.f2, r.f2#{k2}] COMPRESS BY lzo SERIALIZE BY 
avro";
 //      String strStorage = "[r.r.f1,r.f2#{k1}] COMPRESS BY gzip SECURE BY 
user:root user:root; [r.r.f2, r.f2#{k2}] COMPRESS BY lzo SERIALIZE BY avro";
 
-       Partition p = new Partition(schema.toString(), strStorage);
+       Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 3 column group;

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java
 Thu Nov  5 21:02:57 2009
@@ -96,7 +96,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[c] compress by gz; [m1] serialize by avro";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 2 column group;
@@ -160,7 +160,7 @@
   public void testStorageValid2() {
     try {
       String strStorage = "[c] compress by gz; [m1#{k1}] serialize by avro";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 3 column group;

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc3.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc3.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc3.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc3.java
 Thu Nov  5 21:02:57 2009
@@ -49,7 +49,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[c] compress by gz; [m1] serialize by avro";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 2 column group;
@@ -113,7 +113,7 @@
   public void testStorageValid2() {
     try {
       String strStorage = "[c.r] compress by gz; [m1] serialize by avro";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       Assert.assertTrue(false);
       CGSchema[] cgschemas = p.getCGSchemas();
 
@@ -177,7 +177,7 @@
   public void testStorageValid3() {
     try {
       String strStorage = "[c.r.f1] compress by gz; [m1] serialize by avro";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       Assert.assertTrue(false);
       CGSchema[] cgschemas = p.getCGSchemas();
 

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageRecord.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageRecord.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageRecord.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageRecord.java
 Thu Nov  5 21:02:57 2009
@@ -49,7 +49,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[r1.f1, r2.r3.f3]; [r1.f2, r2.r3.f4]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 2 column group;

Modified: 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorePrimitive.java
URL: 
http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorePrimitive.java?rev=833166&r1=833165&r2=833166&view=diff
==============================================================================
--- 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorePrimitive.java
 (original)
+++ 
hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorePrimitive.java
 Thu Nov  5 21:02:57 2009
@@ -73,7 +73,7 @@
   public void testStorageValid1() {
     try {
       String strStorage = "[f1, f2]; [f3, f4] COMPRESS BY gz SERIALIZE BY 
avro";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       // 3 column group;
@@ -169,7 +169,7 @@
   public void testStorageValid2() {
     try {
       String strStorage = "[f1, f2] serialize by avro compress by gz; [f3, f4] 
SERIALIZE BY avro COMPRESS BY gz";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
 
       Assert.assertEquals(cgschemas.length, 3);
@@ -184,7 +184,7 @@
   public void testStorageValid3() {
     try {
       String strStorage = "";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       Assert.assertEquals(cgschemas.length, 1);
       CGSchema cgs1 = cgschemas[0];
@@ -198,7 +198,7 @@
   public void testStorageInvalid1() {
     try {
       String strStorage = "f1";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -215,7 +215,7 @@
   public void testStorageInvalid2() {
     try {
       String strStorage = "[f100]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -232,7 +232,7 @@
   public void testStorageInvalid3() {
     try {
       String strStorage = "f1:long";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -249,7 +249,7 @@
   public void testStorageInvalid4() {
     try {
       String strStorage = "[";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -266,7 +266,7 @@
   public void testStorageInvalid5() {
     try {
       String strStorage = "[f1, f2]; [f1, f4]";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -283,7 +283,7 @@
   public void testStorageInvalid6() {
     try {
       String strStorage = ":";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -300,7 +300,7 @@
   public void testStorageInvalid7() {
     try {
       String strStorage = "[f1, f2] serialize by xyz compress by gz; [f3, f4] 
SERIALIZE BY avro COMPRESS BY lzo";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);
@@ -317,7 +317,7 @@
   public void testStorageInvalid8() {
     try {
       String strStorage = "[f1, f2] serialize by avro compress by xyz; [f3, 
f4] SERIALIZE BY avro COMPRESS BY lzo";
-      Partition p = new Partition(schema.toString(), strStorage);
+      Partition p = new Partition(schema.toString(), strStorage, null);
       CGSchema[] cgschemas = p.getCGSchemas();
       CGSchema cgs1 = cgschemas[0];
       System.out.println(cgs1);


Reply via email to