You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pig.apache.org by ga...@apache.org on 2009/08/12 00:27:47 UTC

svn commit: r803312 [15/16] - in /hadoop/pig/trunk: ./ contrib/zebra/ contrib/zebra/docs/ contrib/zebra/src/ contrib/zebra/src/java/ contrib/zebra/src/java/org/ contrib/zebra/src/java/org/apache/ contrib/zebra/src/java/org/apache/hadoop/ contrib/zebra/...

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableLoader.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigServer;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.test.MiniCluster;
+import org.junit.Test;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import junit.framework.Assert;
+
+/**
+ * Note:
+ * 
+ * Make sure you add the build/pig-0.1.0-dev-core.jar to the Classpath of the
+ * app/debug configuration, when run this from inside the Eclipse.
+ * 
+ */
+public class TestTableLoader {
+  static protected ExecType execType = ExecType.MAPREDUCE;
+  static private MiniCluster cluster;
+  static protected PigServer pigServer;
+  static private Path pathTable;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    if (System.getProperty("hadoop.log.dir") == null) {
+      String base = new File(".").getPath(); // getAbsolutePath();
+      System
+          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
+    }
+
+    if (execType == ExecType.MAPREDUCE) {
+      cluster = MiniCluster.buildCluster();
+      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+    } else {
+      pigServer = new PigServer(ExecType.LOCAL);
+    }
+
+    Configuration conf = new Configuration();
+    FileSystem fs = cluster.getFileSystem();
+    Path pathWorking = fs.getWorkingDirectory();
+    // pathTable = new Path(pathWorking, this.getClass().getSimpleName());
+    pathTable = new Path(pathWorking, "TestTableLoader");
+    System.out.println("pathTable =" + pathTable);
+
+    BasicTable.Writer writer = new BasicTable.Writer(pathTable,
+        "a:string,b,c:string,d,e,f,g", "[a,b,c];[d,e,f,g]", false, conf);
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+
+    final int numsBatch = 10;
+    final int numsInserters = 2;
+    TableInserter[] inserters = new TableInserter[numsInserters];
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i] = writer.getInserter("ins" + i, false);
+    }
+
+    for (int b = 0; b < numsBatch; b++) {
+      for (int i = 0; i < numsInserters; i++) {
+        TypesUtils.resetTuple(tuple);
+        for (int k = 0; k < tuple.size(); ++k) {
+          try {
+            tuple.set(k, b + "_" + i + "" + k);
+          } catch (ExecException e) {
+            e.printStackTrace();
+          }
+        }
+        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
+      }
+    }
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i].close();
+    }
+  }
+
+  @AfterClass
+  static public void tearDown() throws Exception {
+    pigServer.shutdown();
+  }
+
+  @Test
+  public void testReader1() throws ExecException, IOException {
+    String query = "records = LOAD '" + pathTable.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('c, a');";
+    System.out.println(query);
+    pigServer.registerQuery(query);
+    Iterator<Tuple> it = pigServer.openIterator("records");
+    while (it.hasNext()) {
+      Tuple cur = it.next();
+      System.out.println(cur);
+    }
+  }
+
+  @Test
+  public void testReader2() throws ExecException, IOException {
+    String query = "records = LOAD '" + pathTable.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('a, aa, b, c');";
+    System.out.println(query);
+    pigServer.registerQuery(query);
+    Iterator<Tuple> it = pigServer.openIterator("records");
+    int cnt = 0;
+    while (it.hasNext()) {
+      Tuple cur = it.next();
+      cnt++;
+      if (cnt == 1) {
+        Assert.assertEquals(4, cur.size());
+        Assert.assertEquals("0_00", cur.get(0));
+        Assert.assertEquals(null, cur.get(1));
+        Assert.assertEquals("0_01", cur.get(2));
+        Assert.assertEquals("0_02", cur.get(3));
+      }
+      System.out.println(cur);
+    }
+  }
+}

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestTableStorer.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.pig.TableStorer;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigServer;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.test.MiniCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Note:
+ * 
+ * Make sure you add the build/pig-0.1.0-dev-core.jar to the Classpath of the
+ * app/debug configuration, when run this from inside the Eclipse.
+ * 
+ */
+public class TestTableStorer {
+  protected static ExecType execType = ExecType.MAPREDUCE;
+  private static MiniCluster cluster;
+  protected static PigServer pigServer;
+  private static Path pathTable;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    if (System.getProperty("hadoop.log.dir") == null) {
+      String base = new File(".").getPath(); // getAbsolutePath();
+      System
+          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
+    }
+
+    if (execType == ExecType.MAPREDUCE) {
+      cluster = MiniCluster.buildCluster();
+      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+    } else {
+      pigServer = new PigServer(ExecType.LOCAL);
+    }
+
+    Configuration conf = new Configuration();
+    FileSystem fs = cluster.getFileSystem();
+    Path pathWorking = fs.getWorkingDirectory();
+    pathTable = new Path(pathWorking, "TestTableStorer");
+    System.out.println("pathTable =" + pathTable);
+    BasicTable.Writer writer = new BasicTable.Writer(pathTable,
+        "SF_a,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g",
+        "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", false, conf);
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+
+    final int numsBatch = 10;
+    final int numsInserters = 2;
+    TableInserter[] inserters = new TableInserter[numsInserters];
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i] = writer.getInserter("ins" + i, false);
+    }
+
+    for (int b = 0; b < numsBatch; b++) {
+      for (int i = 0; i < numsInserters; i++) {
+        TypesUtils.resetTuple(tuple);
+        for (int k = 0; k < tuple.size(); ++k) {
+          try {
+            tuple.set(k, b + "_" + i + "" + k);
+          } catch (ExecException e) {
+            e.printStackTrace();
+          }
+        }
+        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
+      }
+    }
+    for (int i = 0; i < numsInserters; i++) {
+      inserters[i].close();
+    }
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    pigServer.shutdown();
+  }
+
+  @Test
+  public void testStorer() throws ExecException, IOException {
+    /*
+     * Use pig LOAD to load testing data for store
+     */
+    String query = "records = LOAD '" + pathTable.toString()
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader();";
+    pigServer.registerQuery(query);
+
+    Iterator<Tuple> it = pigServer.openIterator("records");
+    while (it.hasNext()) {
+      Tuple cur = it.next();
+      System.out.println(cur);
+    }
+
+    /*
+     * Use pig STORE to store testing data BasicTable.Writer writer = new
+     * BasicTable.Writer(pathTable, "SF_a,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g",
+     * "[SF_a, SF_b, SF_c]; [SF_e, SF_f, SF_g]", false, conf);
+     */
+    pigServer
+        .store(
+            "records",
+            new Path(pathTable, "store").toString(),
+            TableStorer.class.getCanonicalName()
+                + "('SF_a,SF_b,SF_c,SF_d,SF_e,SF_f,SF_g', '[SF_a, SF_b, SF_c]; [SF_e]')");
+
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/pig/TestUnionMixedTypes.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,506 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.pig;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.io.TableScanner;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigServer;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.DataBag;
+import org.apache.pig.data.DataByteArray;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.test.MiniCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Note:
+ * 
+ * Make sure you add the build/pig-0.1.0-dev-core.jar to the Classpath of the
+ * app/debug configuration, when run this from inside the Eclipse.
+ * 
+ */
+public class TestUnionMixedTypes {
+  protected static ExecType execType = ExecType.MAPREDUCE;
+  private static MiniCluster cluster;
+  protected static PigServer pigServer;
+  private static Path pathWorking, pathTable1, pathTable2;
+  private static Configuration conf;
+  final static String STR_SCHEMA1 = "a:collection(a:string, b:string),b:map(string),c:record(f1:string, f2:string),d";
+  final static String STR_STORAGE1 = "[a,d];[b#{k1|k2}];[c]";
+  final static String STR_SCHEMA2 = "a:collection(a:string, b:string),b:map(string),c:record(f1:string, f2:string),e";
+  final static String STR_STORAGE2 = "[a,e];[b#{k1}];[c.f1]";
+
+  @BeforeClass
+  public static void setUpOnce() throws Exception {
+    if (System.getProperty("hadoop.log.dir") == null) {
+      String base = new File(".").getPath(); // getAbsolutePath();
+      System
+          .setProperty("hadoop.log.dir", new Path(base).toString() + "./logs");
+    }
+
+    if (execType == ExecType.MAPREDUCE) {
+      cluster = MiniCluster.buildCluster();
+      pigServer = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
+    } else {
+      pigServer = new PigServer(ExecType.LOCAL);
+    }
+
+    conf = new Configuration();
+    FileSystem fs = cluster.getFileSystem();
+    pathWorking = fs.getWorkingDirectory();
+
+    /*
+     * create 1st basic table;
+     */
+    pathTable1 = new Path(pathWorking, "1");
+    System.out.println("pathTable1 =" + pathTable1);
+
+    BasicTable.Writer writer = new BasicTable.Writer(pathTable1, STR_SCHEMA1,
+        STR_STORAGE1, false, conf);
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+
+    BasicTable.Writer writer1 = new BasicTable.Writer(pathTable1, conf);
+    int part = 0;
+    TableInserter inserter = writer1.getInserter("part" + part, true);
+
+    TypesUtils.resetTuple(tuple);
+    DataBag bag1 = TypesUtils.createBag();
+    Schema schColl = schema.getColumn(0).getSchema();
+    Tuple tupColl1 = TypesUtils.createTuple(schColl);
+    Tuple tupColl2 = TypesUtils.createTuple(schColl);
+
+    int row = 0;
+    tupColl1.set(0, "1.1");
+    tupColl1.set(1, "1.11");
+    bag1.add(tupColl1);
+    tupColl2.set(0, "1.111");
+    tupColl2.set(1, "1.1111");
+    bag1.add(tupColl2);
+    tuple.set(0, bag1);
+
+    Map<String, String> m1 = new HashMap<String, String>();
+    m1.put("k1", "k11");
+    m1.put("b", "b1");
+    m1.put("c", "c1");
+    tuple.set(1, m1);
+
+    Tuple tupRecord1;
+    try {
+      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("c")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    tupRecord1.set(0, "1");
+    tupRecord1.set(1, "hello1");
+    tuple.set(2, tupRecord1);
+    tuple.set(3, "world1");
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // second row
+    row++;
+    TypesUtils.resetTuple(tuple);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupColl1);
+    TypesUtils.resetTuple(tupColl2);
+    m1.clear();
+    bag1.clear();
+
+    row++;
+    tupColl1.set(0, "2.2");
+    tupColl1.set(1, "2.22");
+    bag1.add(tupColl1);
+    tupColl2.set(0, "2.222");
+    tupColl2.set(1, "2.2222");
+    bag1.add(tupColl2);
+    tuple.set(0, bag1);
+
+    m1.put("k2", "k22");
+    m1.put("k3", "k32");
+    m1.put("k1", "k12");
+    m1.put("k4", "k42");
+    tuple.set(1, m1);
+
+    tupRecord1.set(0, "2");
+    tupRecord1.set(1, "hello2");
+    tuple.set(2, tupRecord1);
+    tuple.set(3, "world2");
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+    inserter.close();
+    writer1.finish();
+    writer.close();
+
+    /*
+     * create 2nd basic table;
+     */
+    pathTable2 = new Path(pathWorking, "2");
+    System.out.println("pathTable2 =" + pathTable2);
+
+    BasicTable.Writer writer2 = new BasicTable.Writer(pathTable2, STR_SCHEMA2,
+        STR_STORAGE2, false, conf);
+    Schema schema2 = writer.getSchema();
+
+    Tuple tuple2 = TypesUtils.createTuple(schema2);
+
+    BasicTable.Writer writer22 = new BasicTable.Writer(pathTable2, conf);
+    part = 0;
+    TableInserter inserter2 = writer22.getInserter("part" + part, true);
+
+    TypesUtils.resetTuple(tuple2);
+    TypesUtils.resetTuple(tuple);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupColl1);
+    TypesUtils.resetTuple(tupColl2);
+    m1.clear();
+    bag1.clear();
+
+    row = 0;
+    tupColl1.set(0, "3.3");
+    tupColl1.set(1, "3.33");
+    bag1.add(tupColl1);
+    tupColl2.set(0, "3.333");
+    tupColl2.set(1, "3.3333");
+    bag1.add(tupColl2);
+    tuple2.set(0, bag1);
+
+    m1.put("k1", "k13");
+    m1.put("b", "b3");
+    m1.put("c", "c3");
+    tuple2.set(1, m1);
+
+    tupRecord1.set(0, "3");
+    tupRecord1.set(1, "hello3");
+    tuple2.set(2, tupRecord1);
+    tuple2.set(3, "world13");
+
+    inserter2.insert(new BytesWritable(String
+        .format("k%d%d", part + 1, row + 1).getBytes()), tuple2);
+
+    // second row
+    row++;
+    TypesUtils.resetTuple(tuple2);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupColl1);
+    TypesUtils.resetTuple(tupColl2);
+    bag1.clear();
+    m1.clear();
+
+    row++;
+    tupColl1.set(0, "4.4");
+    tupColl1.set(1, "4.44");
+    bag1.add(tupColl1);
+    tupColl2.set(0, "4.444");
+    tupColl2.set(1, "4.4444");
+    bag1.add(tupColl2);
+    tuple2.set(0, bag1);
+
+    m1.put("k2", "k24");
+    m1.put("k3", "k34");
+    m1.put("k1", "k14");
+    m1.put("k4", "k44");
+    tuple2.set(1, m1);
+
+    tupRecord1.set(0, "4");
+    tupRecord1.set(1, "hello4");
+    tuple2.set(2, tupRecord1);
+    tuple2.set(3, "world4");
+
+    inserter2.insert(new BytesWritable(String
+        .format("k%d%d", part + 1, row + 1).getBytes()), tuple2);
+    inserter2.close();
+    writer2.finish();
+    writer22.close();
+
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws Exception {
+    pigServer.shutdown();
+  }
+
+  @Test
+  // all fields
+  public void testReader1() throws ExecException, IOException {
+    /*
+     * remove hdfs prefix part like "hdfs://localhost.localdomain:42540" pig
+     * will fill that in.
+     */
+    String str1 = pathTable1.toString().substring(
+        pathTable1.toString().indexOf("/", 7), pathTable1.toString().length());
+    String str2 = pathTable2.toString().substring(
+        pathTable2.toString().indexOf("/", 7), pathTable2.toString().length());
+    String query = "records = LOAD '"
+        + str1
+        + ","
+        + str2
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('a,b#{k1|k2},c.f1');";
+    System.out.println(query);
+
+    pigServer.registerQuery(query);
+    Iterator<Tuple> it = pigServer.openIterator("records");
+
+    Tuple cur = null;
+    int i = 0;
+    int j = 0;
+    // total 4 lines
+    while (it.hasNext()) {
+      cur = it.next();
+
+      i++;
+      System.out.println(" line : " + i + " : " + cur.toString());
+      /*
+       * line : 1 : ({(3.3,3.33),(3.333,3.3333)},[k1#k13,k2#],3) line : 2 :
+       * ({(4,4,4.44),(4.444,4,4444),(4,4,4.44),(4.444,4,4444)},[k1#k14,k2
+       * #k24],4)
+       */
+      // first line
+      Iterator<Tuple> it2 = ((DataBag) cur.get(0)).iterator();
+      while (it2.hasNext()) {
+
+        Tuple cur2 = it2.next();
+        j++;
+
+        if (j == 1) {
+          System.out.println("j is : " + j);
+          Assert.assertEquals(j + "." + j, cur2.get(0));
+          Assert.assertEquals(j + "." + j + j, cur2.get(1));
+        }
+        if (j == 2) {
+          System.out.println("j is : " + j);
+
+          Assert.assertEquals((j - 1) + "." + (j - 1) + (j - 1) + (j - 1), cur2
+              .get(0));
+          Assert.assertEquals((j - 1) + "." + (j - 1) + (j - 1) + (j - 1)
+              + (j - 1), cur2.get(1));
+        }
+
+        TypesUtils.resetTuple(cur2);
+
+      }// inner while
+      if (i == 1) {
+        System.out.println("i is : " + i);
+
+        Assert.assertEquals("k11", ((Map) cur.get(1)).get("k1"));
+        Assert.assertEquals(null, ((Map) cur.get(1)).get("k2"));
+        Assert.assertEquals("1", cur.get(2));
+      }
+
+      if (i == 2) {
+        System.out.println("i should see this line. ");
+        Assert.assertEquals("k12", ((Map) cur.get(1)).get("k1"));
+        Assert.assertEquals("k22", ((Map) cur.get(1)).get("k2"));
+        Assert.assertEquals("2", cur.get(2));
+      }
+      if (i == 3) {
+        System.out.println("i is : " + i);
+
+        Assert.assertEquals("k13", ((Map) cur.get(1)).get("k1"));
+        Assert.assertEquals(null, ((Map) cur.get(1)).get("k2"));
+        Assert.assertEquals("3", cur.get(2));
+      }
+
+      if (i == 4) {
+        System.out.println("i should see this line. ");
+        Assert.assertEquals("k14", ((Map) cur.get(1)).get("k1"));
+        Assert.assertEquals("k24", ((Map) cur.get(1)).get("k2"));
+        Assert.assertEquals("4", cur.get(2));
+      }
+    }// outer while
+
+    Assert.assertEquals(4, i);
+  }
+
+  @Test
+  // one common field only
+  public void testReader2() throws ExecException, IOException {
+    /*
+     * remove hdfs prefix part like "hdfs://localhost.localdomain:42540" pig
+     * will fill that in.
+     */
+    String str1 = pathTable1.toString().substring(
+        pathTable1.toString().indexOf("/", 7), pathTable1.toString().length());
+    String str2 = pathTable2.toString().substring(
+        pathTable2.toString().indexOf("/", 7), pathTable2.toString().length());
+    String query = "records = LOAD '" + str1 + "," + str2
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('b#{k1}');";
+    System.out.println(query);
+
+    pigServer.registerQuery(query);
+    Iterator<Tuple> it = pigServer.openIterator("records");
+
+    Tuple cur = null;
+    int i = 0;
+    int j = 0;
+    // total 4 lines
+    while (it.hasNext()) {
+      cur = it.next();
+
+      i++;
+      System.out.println(" line : " + i + " : " + cur.toString());
+
+      // first line
+
+      if (i == 1) {
+        System.out.println("i is : " + i);
+
+        Assert.assertEquals("k11", ((Map) cur.get(0)).get("k1"));
+        Assert.assertEquals(null, ((Map) cur.get(0)).get("k2"));
+      }
+
+      if (i == 2) {
+        Assert.assertEquals("k12", ((Map) cur.get(0)).get("k1"));
+        Assert.assertEquals(null, ((Map) cur.get(0)).get("k2"));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+      if (i == 3) {
+        System.out.println("i is : " + i);
+
+        Assert.assertEquals("k13", ((Map) cur.get(0)).get("k1"));
+        Assert.assertEquals(null, ((Map) cur.get(0)).get("k2"));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+
+      if (i == 4) {
+        System.out.println("i should see this line. ");
+        Assert.assertEquals("k14", ((Map) cur.get(0)).get("k1"));
+        Assert.assertEquals(null, ((Map) cur.get(0)).get("k2"));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+    }// outer while
+
+    Assert.assertEquals(4, i);
+  }
+
+  @Test
+  // one field which exists in one table only
+  public void testReader3() throws ExecException, IOException {
+
+    String str1 = pathTable1.toString().substring(
+        pathTable1.toString().indexOf("/", 7), pathTable1.toString().length());
+    String str2 = pathTable2.toString().substring(
+        pathTable2.toString().indexOf("/", 7), pathTable2.toString().length());
+    String query = "records = LOAD '" + str1 + "," + str2
+        + "' USING org.apache.hadoop.zebra.pig.TableLoader('d');";
+    System.out.println(query);
+
+    pigServer.registerQuery(query);
+    Iterator<Tuple> it = pigServer.openIterator("records");
+
+    Tuple cur = null;
+    int i = 0;
+    while (it.hasNext()) {
+      cur = it.next();
+
+      i++;
+      System.out.println(" line : " + i + " : " + cur.toString());
+      if (i == 1) {
+        System.out.println("i is : " + i);
+
+        Assert.assertEquals("world1", cur.get(0));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+
+      if (i == 2) {
+
+        Assert.assertEquals("world2", cur.get(0));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+      if (i == 3) {
+
+        Assert.assertEquals(null, cur.get(0));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+
+      if (i == 4) {
+
+        Assert.assertEquals(null, cur.get(0));
+        try {
+          cur.get(1);
+          Assert.fail("should throw index out of bound excepiotn");
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+      }
+    }// outer while
+
+    Assert.assertEquals(4, i);
+  }
+
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestCheckin.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestCheckin.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestCheckin.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestCheckin.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+ 
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+  TestSchemaCollection.class,
+  TestSchemaMap.class,
+  TestSchemaPrimitive.class,
+  TestSchemaRecord.class,
+  TestStorageCollection.class,
+  TestStorageMap.class,
+  TestStorageMisc1.class,
+  TestStorageMisc2.class,
+  TestStorageMisc3.class,
+  TestStorageRecord.class,
+  TestStorePrimitive.class
+})
+
+public class TestCheckin {
+  // the class remains completely empty, 
+  // being used only as a holder for the above annotations
+}

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaCollection.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaCollection.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaCollection.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaCollection.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Test;
+
+public class TestSchemaCollection {
+  @Test
+  public void testSchemaValid1() throws ParseException {
+    String strSch = "c1:collection(f1:int, f2:int), c2:collection(c3:collection(f3:float, f4))";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("c1", f1.name);
+    Assert.assertEquals(ColumnType.COLLECTION, f1.type);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("c2", f2.name);
+    Assert.assertEquals(ColumnType.COLLECTION, f2.type);
+
+    // test 2nd level schema;
+    Schema f1Schema = f1.schema;
+    ColumnSchema f11 = f1Schema.getColumn(0);
+    Assert.assertEquals("f1", f11.name);
+    Assert.assertEquals(ColumnType.INT, f11.type);
+    ColumnSchema f12 = f1Schema.getColumn(1);
+    Assert.assertEquals("f2", f12.name);
+    Assert.assertEquals(ColumnType.INT, f12.type);
+
+    Schema f2Schema = f2.schema;
+    ColumnSchema f21 = f2Schema.getColumn(0);
+    Assert.assertEquals("c3", f21.name);
+    Assert.assertEquals(ColumnType.COLLECTION, f21.type);
+
+    // test 3rd level schema;
+    Schema f21Schema = f21.schema;
+    ColumnSchema f211 = f21Schema.getColumn(0);
+    Assert.assertEquals("f3", f211.name);
+    Assert.assertEquals(ColumnType.FLOAT, f211.type);
+    ColumnSchema f212 = f21Schema.getColumn(1);
+    Assert.assertEquals("f4", f212.name);
+    Assert.assertEquals(ColumnType.BYTES, f212.type);
+  }
+
+  @Test
+  public void testSchemaInvalid1() throws ParseException, Exception {
+    try {
+      String strSch = "c1:collection(f1:int, f2:int), c2:collection(c3:collection(f3:float, f4)))";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \")\" \") \"\" at line 1, column 74.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaMap.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaMap.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaMap.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaMap.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Test;
+
+public class TestSchemaMap {
+  @Test
+  public void testSchemaValid1() throws ParseException {
+    String strSch = "f1:int, m1:map(int)";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("f1", f1.name);
+    Assert.assertEquals(ColumnType.INT, f1.type);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("m1", f2.name);
+    Assert.assertEquals(ColumnType.MAP, f2.type);
+  }
+
+  @Test
+  public void testSchemaValid2() throws ParseException {
+    String strSch = "f1:int, m1:map(map(float))";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("f1", f1.name);
+    Assert.assertEquals(ColumnType.INT, f1.type);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("m1", f2.name);
+    Assert.assertEquals(ColumnType.MAP, f2.type);
+
+    // test 2nd level schema;
+    Schema f2Schema = f2.schema;
+    ColumnSchema f21 = f2Schema.getColumn(0);
+    // Assert.assertEquals("m1", f2.name);
+    Assert.assertEquals(ColumnType.MAP, f21.type);
+
+    // test 3rd level schema;
+    Schema f21Schema = f21.schema;
+    ColumnSchema f211 = f21Schema.getColumn(0);
+    // Assert.assertEquals("m1", f2.name);
+    Assert.assertEquals(ColumnType.FLOAT, f211.type);
+  }
+
+  @Test
+  public void testSchemaValid3() throws ParseException {
+    String strSch = "m1:map(map(float)), m2:map(bool), f3";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("m1", f1.name);
+    Assert.assertEquals(ColumnType.MAP, f1.type);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("m2", f2.name);
+    Assert.assertEquals(ColumnType.MAP, f2.type);
+
+    ColumnSchema f3 = schema.getColumn(2);
+    Assert.assertEquals("f3", f3.name);
+    Assert.assertEquals(ColumnType.BYTES, f3.type);
+
+    // test 2nd level schema;
+    Schema f1Schema = f1.schema;
+    ColumnSchema f11 = f1Schema.getColumn(0);
+    Assert.assertEquals(ColumnType.MAP, f11.type);
+
+    Schema f2Schema = f2.schema;
+    ColumnSchema f21 = f2Schema.getColumn(0);
+    Assert.assertEquals(ColumnType.BOOL, f21.type);
+
+    // test 3rd level schema;
+    Schema f11Schema = f11.schema;
+    ColumnSchema f111 = f11Schema.getColumn(0);
+    Assert.assertEquals(ColumnType.FLOAT, f111.type);
+  }
+
+  @Test
+  public void testSchemaInvalid1() throws ParseException {
+    try {
+      String strSch = "m1:abc";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" <IDENTIFIER> \"abc \"\" at line 1, column 4.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid2() throws ParseException {
+    try {
+      String strSch = "m1:map(int";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \"(\" \"( \"\" at line 1, column 7.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid3() throws ParseException {
+    try {
+      String strSch = "m1:map(int, f2:int";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \"(\" \"( \"\" at line 1, column 7.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid4() throws ParseException {
+    try {
+      String strSch = "m1:map(m2:int)";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \"(\" \"( \"\" at line 1, column 7.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid5() throws ParseException {
+    try {
+      String strSch = "m1:map(abc)";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \"(\" \"( \"\" at line 1, column 7.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaPrimitive.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaPrimitive.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaPrimitive.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaPrimitive.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Test;
+
+public class TestSchemaPrimitive {
+  @Test
+  public void testSchemaValid1() throws ParseException {
+    String strSch = "f1:int, f2:long, f3:float, f4:bool, f5:string, f6:bytes";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("f2", f2.name);
+    Assert.assertEquals(ColumnType.LONG, f2.type);
+  }
+
+  @Test
+  public void testSchemaValid2() throws ParseException {
+    String strSch = "f1:int, f2, f3:float, f4, f5:string, f6:bytes";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("f2", f2.name);
+    Assert.assertEquals(ColumnType.BYTES, f2.type);
+
+    ColumnSchema f4 = schema.getColumn(3);
+    Assert.assertEquals("f4", f4.name);
+    Assert.assertEquals(ColumnType.BYTES, f4.type);
+  }
+
+  /*
+   * @Test public void testSchemaValid3() throws ParseException { try { String
+   * strSch = ",f1:int"; TableSchemaParser parser; Schema schema;
+   * 
+   * parser = new TableSchemaParser(new StringReader(strSch)); schema =
+   * parser.RecordSchema(null); System.out.println(schema);
+   * 
+   * ColumnSchema f1 = schema.getColumn(0); //Assert.assertEquals("f2",
+   * f2.name); //Assert.assertEquals(ColumnType.BYTES, f2.type);
+   * 
+   * ColumnSchema f2 = schema.getColumn(1); //Assert.assertEquals("f1",
+   * f4.name); //Assert.assertEquals(ColumnType.BYTES, f4.type); } catch
+   * (Exception e) { System.out.println(e.getMessage()); } }
+   */
+
+  @Test
+  public void testSchemaInvalid1() {
+    try {
+      String strSch = "f1:int, f2:xyz, f3:float";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" <IDENTIFIER> \"xyz \"\" at line 1, column 12.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid2() {
+    try {
+      String strSch = "f1:";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \"<EOF>\" at line 1, column 3.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid3() {
+    try {
+      String strSch = ":";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \":\" \": \"\" at line 1, column 1.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testSchemaInvalid4() {
+    try {
+      String strSch = "f1:int abc";
+      TableSchemaParser parser;
+      Schema schema;
+
+      parser = new TableSchemaParser(new StringReader(strSch));
+      schema = parser.RecordSchema(null);
+      System.out.println(schema);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" <IDENTIFIER> \"abc \"\" at line 1, column 8.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaRecord.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaRecord.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaRecord.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestSchemaRecord.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Test;
+
+public class TestSchemaRecord {
+  @Test
+  public void testSchemaValid1() throws ParseException {
+    String strSch = "r1:record(f1:int, f2:int), r2:record(r3:record(f3:float, f4))";
+    TableSchemaParser parser;
+    Schema schema;
+
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("r1", f1.name);
+    Assert.assertEquals(ColumnType.RECORD, f1.type);
+
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("r2", f2.name);
+    Assert.assertEquals(ColumnType.RECORD, f2.type);
+
+    // test 2nd level schema;
+    Schema f1Schema = f1.schema;
+    ColumnSchema f11 = f1Schema.getColumn(0);
+    Assert.assertEquals("f1", f11.name);
+    Assert.assertEquals(ColumnType.INT, f11.type);
+    ColumnSchema f12 = f1Schema.getColumn(1);
+    Assert.assertEquals("f2", f12.name);
+    Assert.assertEquals(ColumnType.INT, f12.type);
+
+    Schema f2Schema = f2.schema;
+    ColumnSchema f21 = f2Schema.getColumn(0);
+    Assert.assertEquals("r3", f21.name);
+    Assert.assertEquals(ColumnType.RECORD, f21.type);
+
+    // test 3rd level schema;
+    Schema f21Schema = f21.schema;
+    ColumnSchema f211 = f21Schema.getColumn(0);
+    Assert.assertEquals("f3", f211.name);
+    Assert.assertEquals(ColumnType.FLOAT, f211.type);
+    ColumnSchema f212 = f21Schema.getColumn(1);
+    Assert.assertEquals("f4", f212.name);
+    Assert.assertEquals(ColumnType.BYTES, f212.type);
+  }
+
+  /*
+   * @Test public void testSchemaInvalid1() throws ParseException { try { String
+   * strSch = "m1:abc"; TableSchemaParser parser; Schema schema;
+   * 
+   * parser = new TableSchemaParser(new StringReader(strSch)); schema =
+   * parser.RecordSchema(); System.out.println(schema); } catch (Exception e) {
+   * String errMsg = e.getMessage(); String str =
+   * "Encountered \" <IDENTIFIER> \"abc \"\" at line 1, column 4.";
+   * System.out.println(errMsg); System.out.println(str);
+   * Assert.assertEquals(errMsg.startsWith(str), true); } }
+   */
+}

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageCollection.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import java.util.Map;
+import java.util.HashSet;
+import java.util.Iterator;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.CGSchema;
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Partition;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestStorageCollection {
+  String strSch = "c1:collection(f1:int, f2:int), c2:collection(c3:collection(f3:float, f4))";
+  TableSchemaParser parser;
+  Schema schema;
+
+  @Before
+  public void init() throws ParseException {
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+  }
+
+  @Test
+  public void testStorageValid1() {
+    try {
+      String strStorage = "[c1]; [c2]";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 2 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 2);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals("c1", f11.name);
+      Assert.assertEquals(ColumnType.COLLECTION, f11.type);
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals("c2", f21.name);
+      Assert.assertEquals(ColumnType.COLLECTION, f21.type);
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 2);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = hs
+            .iterator();
+        while (it1.hasNext()) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          if (i == 0) {
+            Assert.assertEquals(name, "c1");
+            Assert.assertEquals(cme.getCGIndex(), 0);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 1) {
+            Assert.assertEquals(name, "c2");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          }
+        }
+      }
+    } catch (Exception e) {
+      Assert.assertTrue(false);
+    }
+  }
+
+  @Test
+  public void testStorageValid2() {
+    try {
+      String strStorage = "[c1.f1]";
+      Partition p = new Partition(schema.toString(), strStorage);
+      Assert.assertTrue(false);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 2 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 2);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals("c1.f1", f11.name);
+      Assert.assertEquals(ColumnType.INT, f11.type);
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals("c1.f2", f21.name);
+      Assert.assertEquals(ColumnType.INT, f21.type);
+      ColumnSchema f22 = cgs2.getSchema().getColumn(1);
+      Assert.assertEquals("c2", f22.name);
+      Assert.assertEquals(ColumnType.COLLECTION, f22.type);
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 3);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = hs
+            .iterator();
+        for (int j = 0; j < hs.size(); j++) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          if (i == 0 && j == 0) {
+            Assert.assertEquals(name, "c1.f2");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 1 && j == 0) {
+            Assert.assertEquals(name, "c1.f1");
+            Assert.assertEquals(cme.getCGIndex(), 0);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 2 && j == 0) {
+            Assert.assertEquals(name, "c2");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 1);
+          }
+        }
+      }
+    } catch (Exception e) {
+    }
+  }
+}

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMap.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.HashSet;
+import java.util.TreeSet;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.CGSchema;
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Partition;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestStorageMap {
+  String strSch = "m1:map(map(float)), m2:map(bool), f3:int";
+  TableSchemaParser parser;
+  Schema schema;
+
+  @Before
+  public void init() throws ParseException {
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+
+  }
+
+  @Test
+  public void testStorageValid1() {
+    try {
+      String strStorage = "[m1#{k1}]; [m2#{k1}, f3]";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 3 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 3);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+      CGSchema cgs3 = cgschemas[2];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals(f11.name, "m1");
+      Assert.assertEquals(ColumnType.MAP, f11.type);
+
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals(f21.name, "m2");
+      // TODO: type should be MAP!
+      Assert.assertEquals(ColumnType.MAP, f21.type);
+
+      ColumnSchema f22 = cgs2.getSchema().getColumn(1);
+      Assert.assertEquals(f22.name, "f3");
+      Assert.assertEquals(ColumnType.INT, f22.type);
+      ColumnSchema f31 = cgs3.getSchema().getColumn(0);
+      Assert.assertEquals(f31.name, "m1");
+      Assert.assertEquals(ColumnType.MAP, f31.type);
+      ColumnSchema f32 = cgs3.getSchema().getColumn(1);
+      Assert.assertEquals(f32.name, "m2");
+      Assert.assertEquals(ColumnType.MAP, f32.type);
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 3);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        TreeSet<Partition.PartitionInfo.ColumnMappingEntry> ts = new TreeSet<Partition.PartitionInfo.ColumnMappingEntry>(
+            hs);
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = ts
+            .iterator();
+        for (int j = 0; j < ts.size(); j++) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          if (i == 0 && j == 0) {
+            Assert.assertEquals(name, "m1");
+            Assert.assertEquals(cme.getCGIndex(), 0);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 0 && j == 1) {
+            Assert.assertEquals(name, "m1");
+            Assert.assertEquals(cme.getCGIndex(), 2);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 1 && j == 0) {
+            Assert.assertEquals(name, "m2");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 1 && j == 1) {
+            Assert.assertEquals(name, "m2");
+            Assert.assertEquals(cme.getCGIndex(), 2);
+            Assert.assertEquals(cme.getFieldIndex(), 1);
+          } else if (i == 2 && j == 0) {
+            Assert.assertEquals(name, "f3");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 1);
+          }
+        }
+      }
+    } catch (Exception e) {
+      Assert.assertTrue(false);
+    }
+  }
+
+  @Test
+  public void testStorageValid2() {
+    try {
+      String strStorage = "[m1#{k1}]; [m1#{k2}, f3]";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 3 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 3);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+      CGSchema cgs3 = cgschemas[2];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals(f11.name, "m1");
+      Assert.assertEquals(ColumnType.MAP, f11.type);
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals(f21.name, "m1");
+      Assert.assertEquals(ColumnType.MAP, f21.type);
+      ColumnSchema f22 = cgs2.getSchema().getColumn(1);
+      Assert.assertEquals(f22.name, "f3");
+      Assert.assertEquals(ColumnType.INT, f22.type);
+      ColumnSchema f31 = cgs3.getSchema().getColumn(0);
+      Assert.assertEquals(f31.name, "m1");
+      Assert.assertEquals(ColumnType.MAP, f31.type);
+      ColumnSchema f32 = cgs3.getSchema().getColumn(1);
+      Assert.assertEquals(f32.name, "m2");
+      Assert.assertEquals(ColumnType.MAP, f32.type);
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 3);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        TreeSet<Partition.PartitionInfo.ColumnMappingEntry> ts = new TreeSet<Partition.PartitionInfo.ColumnMappingEntry>(
+            hs);
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = ts
+            .iterator();
+        for (int j = 0; j < ts.size(); j++) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          if (i == 0 && j == 0) {
+            Assert.assertEquals(name, "m1");
+            Assert.assertEquals(cme.getCGIndex(), 0);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 0 && j == 1) {
+            Assert.assertEquals(name, "m1");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 0 && j == 1) {
+            Assert.assertEquals(name, "m1");
+            Assert.assertEquals(cme.getCGIndex(), 2);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 1 && j == 0) {
+            Assert.assertEquals(name, "m2");
+            Assert.assertEquals(cme.getCGIndex(), 2);
+            Assert.assertEquals(cme.getFieldIndex(), 1);
+          } else if (i == 2 && j == 0) {
+            Assert.assertEquals(name, "f3");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 1);
+          }
+        }
+      }
+    } catch (Exception e) {
+      Assert.assertTrue(false);
+    }
+  }
+
+  @Test
+  public void testStorageInvalid1() {
+    try {
+      String strStorage = "m1#{k1}";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+      CGSchema cgs1 = cgschemas[0];
+      System.out.println(cgs1);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" <IDENTIFIER> \"m1 \"\" at line 1, column 1.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testStorageInvalid2() {
+    try {
+      String strStorage = "[m1#{k1}] abc; [m1#{k2}, f3] xyz";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+      CGSchema cgs1 = cgschemas[0];
+      System.out.println(cgs1);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" <IDENTIFIER> \"abc \"\" at line 1, column 11.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+
+  @Test
+  public void testStorageInvalid3() {
+    try {
+      String strStorage = "[m1{#k1}{#k2}]";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+      CGSchema cgs1 = cgschemas[0];
+      System.out.println(cgs1);
+    } catch (Exception e) {
+      String errMsg = e.getMessage();
+      String str = "Encountered \" \"[\" \"[ \"\" at line 1, column 1.";
+      System.out.println(errMsg);
+      System.out.println(str);
+      Assert.assertEquals(errMsg.startsWith(str), true);
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc1.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.HashSet;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.CGSchema;
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Partition;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestStorageMisc1 {
+  String strSch = "r:record(r:record(f1:int, f2:int), f2:map)";
+  TableSchemaParser parser;
+  Schema schema;
+
+  @Before
+  public void init() throws ParseException {
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+  }
+
+  @Test
+  public void testSchema() throws ParseException {
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("r", f1.name);
+    Assert.assertEquals(ColumnType.RECORD, f1.type);
+
+    // test 2nd level schema;
+    Schema f1Schema = f1.schema;
+    ColumnSchema f11 = f1Schema.getColumn(0);
+    Assert.assertEquals("r", f11.name);
+    Assert.assertEquals(ColumnType.RECORD, f11.type);
+    ColumnSchema f12 = f1Schema.getColumn(1);
+    Assert.assertEquals("f2", f12.name);
+    Assert.assertEquals(ColumnType.MAP, f12.type);
+
+    // test 3rd level schema;
+    Schema f11Schema = f11.schema;
+    ColumnSchema f111 = f11Schema.getColumn(0);
+    Assert.assertEquals("f1", f111.name);
+    Assert.assertEquals(ColumnType.INT, f111.type);
+    ColumnSchema f112 = f11Schema.getColumn(1);
+    Assert.assertEquals("f2", f112.name);
+    Assert.assertEquals(ColumnType.INT, f112.type);
+
+    Schema f12Schema = f12.schema;
+    ColumnSchema f121 = f12Schema.getColumn(0);
+    // Assert.assertEquals("", f121.name);
+    Assert.assertEquals(ColumnType.BYTES, f121.type);
+  }
+
+  @Test
+  public void testStorageValid1() {
+    try {
+      String strStorage = "[r.r.f1,r.f2#{k1}] COMPRESS BY gzip ; [r.r.f2, r.f2#{k2}] COMPRESS BY lzo2 SERIALIZE BY avro";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 3 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 3);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+      CGSchema cgs3 = cgschemas[2];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals("r.r.f1", f11.name);
+      Assert.assertEquals(ColumnType.INT, f11.type);
+      ColumnSchema f12 = cgs1.getSchema().getColumn(1);
+      Assert.assertEquals("r.f2", f12.name);
+      Assert.assertEquals(ColumnType.MAP, f12.type);
+
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals("r.r.f2", f21.name);
+      Assert.assertEquals(ColumnType.INT, f21.type);
+      ColumnSchema f22 = cgs2.getSchema().getColumn(1);
+      Assert.assertEquals("r.f2", f22.name);
+      Assert.assertEquals(ColumnType.MAP, f22.type);
+
+      ColumnSchema f31 = cgs3.getSchema().getColumn(0);
+      Assert.assertEquals("r.f2", f31.name);
+      Assert.assertEquals(ColumnType.MAP, f31.type);
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 3);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = hs
+            .iterator();
+        for (int j = 0; j < hs.size(); j++) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          /*
+           * if (i == 0 && j == 0) { Assert.assertEquals(name, "r.r.f1");
+           * Assert.assertEquals(cme.getCGIndex(), 0);
+           * Assert.assertEquals(cme.getFieldIndex(), 0); } else if (i == 1 && j
+           * == 0) { Assert.assertEquals(name, "r.r.f2");
+           * Assert.assertEquals(cme.getCGIndex(), 1);
+           * Assert.assertEquals(cme.getFieldIndex(), 0); } else if (i == 2 && j
+           * == 0) { Assert.assertEquals(name, "r.f2");
+           * Assert.assertEquals(cme.getCGIndex(), 1);
+           * Assert.assertEquals(cme.getFieldIndex(), 1); } else if (i == 2 && j
+           * == 1) { Assert.assertEquals(name, "r.f2");
+           * Assert.assertEquals(cme.getCGIndex(), 2);
+           * Assert.assertEquals(cme.getFieldIndex(), 0); } else if (i == 2 && j
+           * == 2) { Assert.assertEquals(name, "r.f2");
+           * Assert.assertEquals(cme.getCGIndex(), 0);
+           * Assert.assertEquals(cme.getFieldIndex(), 1); }
+           */
+        }
+      }
+    } catch (Exception e) {
+      Assert.assertTrue(false);
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/types/TestStorageMisc2.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.zebra.types;
+
+import java.io.StringReader;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.HashSet;
+import junit.framework.Assert;
+
+import org.apache.hadoop.zebra.types.CGSchema;
+import org.apache.hadoop.zebra.types.ColumnType;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Partition;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TableSchemaParser;
+import org.apache.hadoop.zebra.types.Schema.ColumnSchema;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestStorageMisc2 {
+  String strSch = "c:collection(r:record(r:record(f1:int, f2:int), f2:map)), m1:map(int)";
+  TableSchemaParser parser;
+  Schema schema;
+
+  @Before
+  public void init() throws ParseException {
+    parser = new TableSchemaParser(new StringReader(strSch));
+    schema = parser.RecordSchema(null);
+  }
+
+  @Test
+  public void testSchema() throws ParseException {
+    System.out.println(schema);
+
+    // test 1st level schema;
+    ColumnSchema f1 = schema.getColumn(0);
+    Assert.assertEquals("c", f1.name);
+    Assert.assertEquals(ColumnType.COLLECTION, f1.type);
+    ColumnSchema f2 = schema.getColumn(1);
+    Assert.assertEquals("m1", f2.name);
+    Assert.assertEquals(ColumnType.MAP, f2.type);
+
+    // test 2nd level schema;
+    Schema f1Schema = f1.schema;
+    ColumnSchema f11 = f1Schema.getColumn(0);
+    Assert.assertEquals("r", f11.name);
+    Assert.assertEquals(ColumnType.RECORD, f11.type);
+
+    Schema f2Schema = f2.schema;
+    ColumnSchema f21 = f2Schema.getColumn(0);
+    // Assert.assertEquals("", f21.name);
+    Assert.assertEquals(ColumnType.INT, f21.type);
+
+    // test 3rd level schema;
+    Schema f11Schema = f11.schema;
+    ColumnSchema f111 = f11Schema.getColumn(0);
+    Assert.assertEquals("r", f111.name);
+    Assert.assertEquals(ColumnType.RECORD, f111.type);
+    ColumnSchema f112 = f11Schema.getColumn(1);
+    Assert.assertEquals("f2", f112.name);
+    Assert.assertEquals(ColumnType.MAP, f112.type);
+
+    // test 4th level schema;
+    Schema f111Schema = f111.schema;
+    ColumnSchema f1111 = f111Schema.getColumn(0);
+    Assert.assertEquals("f1", f1111.name);
+    Assert.assertEquals(ColumnType.INT, f1111.type);
+    ColumnSchema f1112 = f111Schema.getColumn(1);
+    Assert.assertEquals("f2", f1112.name);
+    Assert.assertEquals(ColumnType.INT, f1112.type);
+
+    Schema f112Schema = f112.schema;
+    ColumnSchema f1121 = f112Schema.getColumn(0);
+    // Assert.assertEquals("", f1121.name);
+    Assert.assertEquals(ColumnType.BYTES, f1121.type);
+  }
+
+  @Test
+  public void testStorageValid1() {
+    try {
+      String strStorage = "[c] compress by gzip; [m1] serialize by avro";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 2 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 2);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals("c", f11.name);
+      Assert.assertEquals(ColumnType.COLLECTION, f11.type);
+
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals("m1", f21.name);
+      Assert.assertEquals(ColumnType.MAP, f21.type);
+
+      Assert.assertEquals(cgs1.getCompressor(), "gzip");
+      Assert.assertEquals(cgs2.getSerializer(), "avro");
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 2);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = hs
+            .iterator();
+        for (int j = 0; j < hs.size(); j++) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          if (i == 0 && j == 0) {
+            Assert.assertEquals(name, "c");
+            Assert.assertEquals(cme.getCGIndex(), 0);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          } else if (i == 1 && j == 0) {
+            Assert.assertEquals(name, "m1");
+            Assert.assertEquals(cme.getCGIndex(), 1);
+            Assert.assertEquals(cme.getFieldIndex(), 0);
+          }
+        }
+      }
+    } catch (Exception e) {
+      Assert.assertTrue(false);
+    }
+  }
+
+  @Test
+  public void testStorageValid2() {
+    try {
+      String strStorage = "[c] compress by gzip; [m1#{k1}] serialize by avro";
+      Partition p = new Partition(schema.toString(), strStorage);
+      CGSchema[] cgschemas = p.getCGSchemas();
+
+      // 3 column group;
+      int size = cgschemas.length;
+      Assert.assertEquals(size, 3);
+      System.out.println("********** Column Groups **********");
+      for (int i = 0; i < cgschemas.length; i++) {
+        System.out.println(cgschemas[i]);
+        System.out.println("--------------------------------");
+      }
+      CGSchema cgs1 = cgschemas[0];
+      CGSchema cgs2 = cgschemas[1];
+      CGSchema cgs3 = cgschemas[2];
+
+      ColumnSchema f11 = cgs1.getSchema().getColumn(0);
+      Assert.assertEquals("c", f11.name);
+      Assert.assertEquals(ColumnType.COLLECTION, f11.type);
+
+      ColumnSchema f21 = cgs2.getSchema().getColumn(0);
+      Assert.assertEquals("m1", f21.name);
+      Assert.assertEquals(ColumnType.MAP, f21.type);
+
+      ColumnSchema f31 = cgs3.getSchema().getColumn(0);
+      Assert.assertEquals("m1", f31.name);
+      Assert.assertEquals(ColumnType.MAP, f31.type);
+
+      System.out.println("*********** Column Map **********");
+      Map<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> colmap = p
+          .getPartitionInfo().getColMap();
+      Assert.assertEquals(colmap.size(), 2);
+      Iterator<Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>> it = colmap
+          .entrySet().iterator();
+      for (int i = 0; i < colmap.size(); i++) {
+        Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>> entry = (Map.Entry<String, HashSet<Partition.PartitionInfo.ColumnMappingEntry>>) it
+            .next();
+        String name = entry.getKey();
+        HashSet<Partition.PartitionInfo.ColumnMappingEntry> hs = entry
+            .getValue();
+        Iterator<Partition.PartitionInfo.ColumnMappingEntry> it1 = hs
+            .iterator();
+        for (int j = 0; j < hs.size(); j++) {
+          Partition.PartitionInfo.ColumnMappingEntry cme = (Partition.PartitionInfo.ColumnMappingEntry) it1
+              .next();
+          System.out.println("[Column = " + name + " CG = " + cme.getCGIndex()
+              + "." + cme.getFieldIndex() + "]");
+          /*
+           * if (i == 0 && j == 0) { Assert.assertEquals(name, "c");
+           * Assert.assertEquals(cme.getCGIndex(), 0);
+           * Assert.assertEquals(cme.getFieldIndex(), 0); } else if (i == 0 && j
+           * == 1) { Assert.assertEquals(name, "m1");
+           * Assert.assertEquals(cme.getCGIndex(), 1);
+           * Assert.assertEquals(cme.getFieldIndex(), 0); } else if (i == 1 && j
+           * == 0) { Assert.assertEquals(name, "m1");
+           * Assert.assertEquals(cme.getCGIndex(), 2);
+           * Assert.assertEquals(cme.getFieldIndex(), 0); }
+           */
+        }
+      }
+    } catch (Exception e) {
+      Assert.assertTrue(false);
+    }
+  }
+}
\ No newline at end of file