You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pig.apache.org by ga...@apache.org on 2009/08/12 00:27:47 UTC

svn commit: r803312 [11/16] - in /hadoop/pig/trunk: ./ contrib/zebra/ contrib/zebra/docs/ contrib/zebra/src/ contrib/zebra/src/java/ contrib/zebra/src/java/org/ contrib/zebra/src/java/org/apache/ contrib/zebra/src/java/org/apache/hadoop/ contrib/zebra/...

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestNegative.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestNegative.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestNegative.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestNegative.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,619 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.zebra.io;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.io.TableScanner;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Projection;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.DataBag;
+import org.apache.pig.data.DataByteArray;
+import org.apache.pig.data.Tuple;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ * Test projections on complicated column types.
+ * 
+ */
+public class TestNegative {
+  private static Configuration conf;
+  private static Path path;
+  private static FileSystem fs;
+
+  @BeforeClass
+  public static void setUpOnce() throws IOException {
+
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws IOException {
+    BasicTable.drop(path, conf);
+  }
+
+  // Negative test case. For record split, we should not try to store same
+  // record field on different column groups.
+  @Test
+  public void testWriteRecord5() throws IOException, ParseException {
+    String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4))";
+    String STR_STORAGE = "[r1.f1]; [r2.r3]; [r1.f2, r2.r3.f3]";
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Negative test case. map storage syntax is wrong
+  @Test
+  public void testWriteMap1() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string)))";
+    String STR_STORAGE = "[m2#{k}#{j}]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Negative test case. map storage syntax is wrong
+  @Test
+  public void testWriteMap2() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string)))";
+    String STR_STORAGE = "[m2.{k}]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Negative test case. map storage syntax is wrong
+  @Test
+  public void testWriteMap3() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string)))";
+    String STR_STORAGE = "[m2{k}]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Negative test case. map storage syntax is wrong
+  @Test
+  public void testWriteMap4() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string)))";
+    String STR_STORAGE = "[m2#{k}";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Negative test case. map schema syntax is wrong
+  @Test
+  public void testWriteMap5() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string,string,string)))";
+    String STR_STORAGE = "[m2#{k}]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Negative test case. map storage syntax is wrong.
+  @Test
+  public void testWriteMap6() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string)))";
+    String STR_STORAGE = "[m2#k#k1]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // negative, map storage syntax is wrong
+  @Test
+  public void testWriteMap7() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string)))";
+    String STR_STORAGE = "[m2#k]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // negative, should not take same field name
+  @Test
+  public void testWriteRecord1() throws IOException, ParseException {
+    String STR_SCHEMA = " r1:record(f1,f2), r1:record(f1,f2)";
+    String STR_STORAGE = "[r1]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // negative, duplicate column storage
+  @Test
+  public void testWriteRecord2() throws IOException, ParseException {
+    String STR_SCHEMA = " r1:record(f1,f2), r2:record(f1,f2)";
+    String STR_STORAGE = "[r1,r1]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // negative, duplicate column storage
+  @Test
+  public void testWriteRecord3() throws IOException, ParseException {
+    String STR_SCHEMA = " r1:record(f1,f2), r2:record(f1,f2)";
+    String STR_STORAGE = "[r1.f1, r2]; [r1.f1,r2]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // negative, duplicate column storage
+  @Test
+  public void testWriteRecord4() throws IOException, ParseException {
+    String STR_SCHEMA = " r1:record(f1,f2), r2:record(f1,f2)";
+    String STR_STORAGE = "[r1.f1]; [r1.f1,r2]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // negative, null column storage
+  @Test
+  public void testWriteNull5() throws IOException, ParseException {
+    String STR_SCHEMA = " r1:record(f1,f2), r2:record(f1,f2)";
+
+    String STR_STORAGE = null;
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      System.out.println("HERE HERE");
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
+
+  // empty column group
+  @Test
+  public void testWriteEmpty6() throws IOException, ParseException {
+    String STR_SCHEMA = "f1:int, f2:string";
+
+    String STR_STORAGE = "";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+    } catch (Exception e) {
+      e.printStackTrace();
+      Assert.fail("Should Not throw exception");
+    }
+    writer.finish();
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
+    int part = 0;
+    TableInserter inserter = writer1.getInserter("part" + part, true);
+    TypesUtils.resetTuple(tuple);
+
+    // insert data in row 1
+    int row = 0;
+    tuple.set(0, 1);
+    tuple.set(1, "hello1");
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // row 2
+    row++;
+    TypesUtils.resetTuple(tuple);
+
+    tuple.set(0, 2);
+    tuple.set(1, "hello2");
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // finish building table, closing out the inserter, writer, writer1
+    inserter.close();
+    writer1.finish();
+    writer.close();
+
+    String projection3 = new String("f1,f2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Assert.assertEquals(1, RowValue.get(0));
+    Assert.assertEquals("hello1", RowValue.get(1));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    Assert.assertEquals("hello2", RowValue.get(1));
+
+    reader.close();
+
+  }
+
+  // Positive test case. map storage , [m2] will storage everything besides k1
+  @Test
+  public void testMapWrite8() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string))), m1:map(int)";
+    String STR_STORAGE = "[m2#{k1}];[m2]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+    } catch (Exception e) {
+      System.out.println(e);
+      Assert.fail("Should throw exception");
+    }
+  }
+
+  // Negative test case.duplicate map storage
+  @Test
+  public void testMapWrite9() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string))), m1:map(int)";
+    String STR_STORAGE = "[m2#{k1}], [m2#{k1}]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+  // Positive test case.duplicate map storage, TODO: why failed?
+
+  public void xtestMapWrite10() throws IOException, ParseException {
+    String STR_SCHEMA = " m2:map(map(map(string))), m1:map(int)";
+    String STR_STORAGE = "[m2#{k1}]; [m2#{k2}]";
+
+    Configuration conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    FileSystem fs = new LocalFileSystem(rawLFS);
+    Path path = new Path(fs.getWorkingDirectory(), this.getClass()
+        .getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+    } catch (Exception e) {
+      e.printStackTrace();
+      Assert.fail("Should Not throw exception");
+    }
+
+  }
+
+  // negative, schema, same field name, different type r1:record(f1:int,
+  // f2:long).
+  @Test
+  public void testColumnField5() throws IOException, ParseException {
+    String STR_SCHEMA = "r1:int, r1:float";
+    String STR_STORAGE = "[r1]";
+
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = null;
+    try {
+      writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, false, conf);
+      Assert.fail("Should throw exception");
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,415 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.zebra.io;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.io.TableScanner;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Projection;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.DataBag;
+import org.apache.pig.data.DataByteArray;
+import org.apache.pig.data.Tuple;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ * Test projections on complicated column types.
+ * 
+ */
+public class TestRecord {
+  private static Configuration conf;
+  private static Path path;
+  private static FileSystem fs;
+  static int count;
+  final static String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(f5, r3:record(f3:float, f4))";
+  final static String STR_STORAGE = "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]";
+
+  @BeforeClass
+  public static void setUpOnce() throws IOException {
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), "TestRecord");
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
+        STR_STORAGE, false, conf);
+    writer.finish();
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
+    int part = 0;
+    TableInserter inserter = writer1.getInserter("part" + part, true);
+    TypesUtils.resetTuple(tuple);
+
+    Tuple tupRecord1;
+    try {
+      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord2;
+    try {
+      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord3;
+    try {
+      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4"));
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    // insert data in row 1
+    int row = 0;
+    // r1:record(f1:int, f2:long
+    tupRecord1.set(0, 1);
+    tupRecord1.set(1, 1001L);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, f4))
+    tupRecord2.set(0, "f5 row1 byte array");
+
+    tupRecord3.set(0, 1.3);
+    tupRecord3.set(1, new DataByteArray("r3 row1 byte array"));
+    tupRecord2.set(1, tupRecord3);
+    tuple.set(1, tupRecord2);
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // row 2
+    row++;
+    TypesUtils.resetTuple(tuple);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupRecord2);
+    TypesUtils.resetTuple(tupRecord3);
+    // r1:record(f1:int, f2:long
+    tupRecord1.set(0, 2);
+    tupRecord1.set(1, 1002L);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, f4))
+    tupRecord2.set(0, "f5 row2 byte array");
+    tupRecord3.set(0, 2.3);
+    tupRecord3.set(1, new DataByteArray("r3 row2 byte array"));
+    tupRecord2.set(1, tupRecord3);
+    tuple.set(1, tupRecord2);
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // finish building table, closing out the inserter, writer, writer1
+    inserter.close();
+    writer1.finish();
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws IOException {
+    BasicTable.drop(path, conf);
+  }
+
+  @Test
+  // Test read , simple projection 0, not record of record level.
+  public void testRead1() throws IOException, ParseException {
+    String projection0 = new String("r1.f1, r1.f2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection0);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    System.out.println("read 1:" + RowValue.toString());
+    // read 1:(1,1001L)
+    Assert.assertEquals(1, RowValue.get(0));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    reader.close();
+
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(f5, r3:record(f3:float, f4))";
+   * String STR_STORAGE = "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]";
+   */
+  @Test
+  // Test read , record of record level,
+  public void testRead2() throws IOException, ParseException {
+    String projection1 = new String("r2.r3.f4, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection1);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    System.out.println("read 2:" + RowValue.toString());
+    // read 2:(r3 row1 byte array,1.3)
+    Assert.assertEquals("r3 row1 byte array", RowValue.get(0).toString());
+    Assert.assertEquals(1.3, RowValue.get(1));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals("r3 row2 byte array", RowValue.get(0).toString());
+    Assert.assertEquals(2.3, RowValue.get(1));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(f5, r3:record(f3:float, f4))";
+   * String STR_STORAGE = "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]";
+   */
+  @Test
+  // test stitch, r1.f1, r2.r3.f3
+  public void testRead3() throws IOException, ParseException {
+    String projection2 = new String("r1.f1, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection2);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    System.out.println("read 3:" + RowValue.toString());
+    // read 3:(1,1.3)
+    Assert.assertEquals(1, RowValue.get(0));
+    Assert.assertEquals(1.3, RowValue.get(1));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    Assert.assertEquals(2.3, RowValue.get(1));
+    reader.close();
+  }
+
+  @Test
+  public void testRead4() throws IOException, ParseException {
+    String projection3 = new String("r1, r2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    System.out.println("read 4:" + RowValue.toString());
+    // read 4:((1,1001L),(f5 row1 byte array,(1.3,r3 row1 byte array)))
+
+    Assert.assertEquals(1, ((Tuple) RowValue.get(0)).get(0));
+    Assert.assertEquals(1001L, ((Tuple) RowValue.get(0)).get(1));
+    Assert.assertEquals("f5 row1 byte array", ((Tuple) RowValue.get(1)).get(0)
+        .toString());
+
+    Assert.assertEquals(1.3, ((Tuple) ((Tuple) RowValue.get(1)).get(1)).get(0));
+    Assert.assertEquals("r3 row1 byte array",
+        ((Tuple) ((Tuple) RowValue.get(1)).get(1)).get(1).toString());
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, ((Tuple) RowValue.get(0)).get(0));
+    Assert.assertEquals(1002L, ((Tuple) RowValue.get(0)).get(1));
+    Assert.assertEquals("f5 row2 byte array", ((Tuple) RowValue.get(1)).get(0)
+        .toString());
+
+    Assert.assertEquals(2.3, ((Tuple) ((Tuple) RowValue.get(1)).get(1)).get(0));
+    Assert.assertEquals("r3 row2 byte array",
+        ((Tuple) ((Tuple) RowValue.get(1)).get(1)).get(1).toString());
+
+    reader.close();
+  }
+
+  @Test
+  // test stitch, r2.f5.f3
+  public void testRead5() throws IOException, ParseException {
+    String projection3 = new String("r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    System.out.println("read 5:" + RowValue.toString());
+    // read 5:(1.3)
+    Assert.assertEquals(1.3, RowValue.get(0));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2.3, RowValue.get(0));
+
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(f5, r3:record(f3:float, f4))";
+   * String STR_STORAGE = "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]";
+   */
+  @Test
+  // test stitch,(r1.f2, r1.f1)
+  public void testRead6() throws IOException, ParseException {
+    String projection3 = new String("r1.f2,r1.f1");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    System.out.println("read 6:" + RowValue.toString());
+    // read 6:(1001L,1)
+
+    Assert.assertEquals(1001L, RowValue.get(0));
+    Assert.assertEquals(1, RowValue.get(1));
+    try {
+      RowValue.get(2);
+      Assert.fail("Should throw index out of bound exception ");
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1002L, RowValue.get(0));
+    Assert.assertEquals(2, RowValue.get(1));
+    try {
+      RowValue.get(2);
+      Assert.fail("Should throw index out of bound exception ");
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+    reader.close();
+  }
+
+  @Test
+  // test projection, negative, none-exist column name.
+  public void testReadNegative1() throws IOException, ParseException {
+    String projection4 = new String("r3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection4);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, RowValue.get(0));
+    reader.close();
+
+  }
+
+  @Test
+  // test projection, negative, none-exist column name.
+  public void testReadNegative2() throws IOException, ParseException {
+    String projection4 = new String("NO");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection4);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, RowValue.get(0));
+    reader.close();
+
+  }
+}
\ No newline at end of file

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord2Map.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord2Map.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord2Map.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord2Map.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,411 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.zebra.io;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.io.TableScanner;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Projection;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.DataBag;
+import org.apache.pig.data.DataByteArray;
+import org.apache.pig.data.Tuple;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ * Test projections on complicated column types.
+ * 
+ */
+public class TestRecord2Map {
+  private static Configuration conf;
+  private static Path path;
+  private static FileSystem fs;
+  static int count;
+  static String STR_SCHEMA = "r1:record(f1:int, f2:map(long)), r2:record(r3:record(f3:float, f4:map(int)))";
+  static String STR_STORAGE = "[r1.f1, r1.f2#{x|y}]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+
+  @BeforeClass
+  public static void setUpOnce() throws IOException {
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), "TestRecord2Map");
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
+        STR_STORAGE, false, conf);
+    writer.finish();
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
+    int part = 0;
+    TableInserter inserter = writer1.getInserter("part" + part, true);
+    TypesUtils.resetTuple(tuple);
+
+    Tuple tupRecord1;
+    try {
+      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord2;
+    try {
+      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord3;
+    try {
+      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4:map(int)"));
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+    HashMap<String, Long> map1 = new HashMap<String, Long>();
+    // insert data in row 1
+    int row = 0;
+    // r1:record(f1:int, f2:map(long))
+    tupRecord1.set(0, 1);
+    map1.put("x", 1001L);
+    tupRecord1.set(1, map1);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, map(int)))
+    HashMap<String, Integer> map = new HashMap<String, Integer>();
+    tupRecord2.set(0, tupRecord3);
+    tupRecord3.set(0, 1.3);
+    map.put("a", 1);
+    map.put("b", 2);
+    map.put("c", 3);
+    tupRecord3.set(1, map);
+    tuple.set(1, tupRecord2);
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // row 2
+    map1.clear();
+    row++;
+    TypesUtils.resetTuple(tuple);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupRecord2);
+    TypesUtils.resetTuple(tupRecord3);
+    // r1:record(f1:int, f2:map(long))
+    tupRecord1.set(0, 2);
+    map1.put("y", 1002L);
+    tupRecord1.set(1, map1);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, f4))
+    tupRecord2.set(0, tupRecord3);
+    map.clear();
+    map.put("x", 11);
+    map.put("y", 12);
+    map.put("c", 13);
+
+    tupRecord3.set(0, 2.3);
+    tupRecord3.set(1, map);
+    tuple.set(1, tupRecord2);
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // finish building table, closing out the inserter, writer, writer1
+    inserter.close();
+    writer1.finish();
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws IOException {
+    BasicTable.drop(path, conf);
+  }
+
+  @Test
+  public void testReadSimpleRecord() throws IOException, ParseException {
+    // Starting read , simple projection 0, not record of record level. PASS
+    String projection0 = new String("r1.f1, r1.f2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection0);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, RowValue.get(0));
+    Long expected = 1001L;
+    Assert.assertEquals(expected, ((Map<String, Long>) RowValue.get(1))
+        .get("x"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(1)).get("y"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(1)).get("a"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(1)).get("z"));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    expected = 1002L;
+    Assert.assertEquals(expected, ((Map<String, Long>) RowValue.get(1))
+        .get("y"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(1)).get("x"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(1)).get("a"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(1)).get("z"));
+
+    reader.close();
+
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, map(int))";
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  @Test
+  public void testReadRecordOfMap1() throws IOException, ParseException {
+    String projection1 = new String("r2.r3.f4#{a|b}, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection1);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, ((Map) (RowValue.get(0))).get("a"));
+    Assert.assertEquals(2, ((Map) RowValue.get(0)).get("b"));
+    Assert.assertEquals(null, ((Map) (RowValue.get(0))).get("x"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("y"));
+    Assert.assertEquals(1.3, RowValue.get(1));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("a"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("b"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("x"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("y"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("c"));
+    Assert.assertEquals(2.3, RowValue.get(1));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int))";
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  @Test
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  public void testReadRecordOfRecord2() throws IOException, ParseException {
+    String projection2 = new String("r1.f1, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection2);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, RowValue.get(0));
+    Assert.assertEquals(1.3, RowValue.get(1));
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    Assert.assertEquals(2.3, RowValue.get(1));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int)))" ;
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  public void testReadRecordOfRecord3() throws IOException, ParseException {
+    String projection3 = new String("r1, r2, r1.f2#{x|z}");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Assert.assertEquals(1, ((Tuple) RowValue.get(0)).get(0));
+    Long expected = 1001L;
+    Assert.assertEquals(expected, ((Map<String, Long>) RowValue.get(2))
+        .get("x"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("y"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("z"));
+    Tuple r2 = (Tuple) RowValue.get(1);
+    Tuple r3 = (Tuple) r2.get(0);
+    Map<String, Integer> f4 = (Map<String, Integer>) r3.get(1);
+    Integer tmp = 1;
+    Assert.assertEquals(tmp, f4.get("a"));
+    tmp = 2;
+    Assert.assertEquals(tmp, f4.get("b"));
+    tmp = 3;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(1.3, ((Tuple) ((Tuple) RowValue.get(1)).get(0)).get(0));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, ((Tuple) RowValue.get(0)).get(0));
+    expected = 1002L;
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("x"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("y"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("z"));
+    r2 = (Tuple) RowValue.get(1);
+    r3 = (Tuple) r2.get(0);
+    f4 = (Map<String, Integer>) r3.get(1);
+    tmp = 11;
+    Assert.assertEquals(tmp, f4.get("x"));
+    tmp = 12;
+    Assert.assertEquals(tmp, f4.get("y"));
+    tmp = 13;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(2.3, ((Tuple) ((Tuple) RowValue.get(1)).get(0)).get(0));
+
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int)))" ;
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  @Test
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  public void testReadRecordOfRecord4() throws IOException, ParseException {
+    String projection3 = new String("r2.r3.f4");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Map<String, Integer> f4 = (Map<String, Integer>) RowValue.get(0);
+    Integer tmp = 1;
+    Assert.assertEquals(tmp, f4.get("a"));
+    tmp = 2;
+    Assert.assertEquals(tmp, f4.get("b"));
+    tmp = 3;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(null, f4.get("x"));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+
+    f4 = (Map<String, Integer>) RowValue.get(0);
+    tmp = 11;
+    Assert.assertEquals(tmp, f4.get("x"));
+    tmp = 12;
+    Assert.assertEquals(tmp, f4.get("y"));
+    tmp = 13;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(null, f4.get("a"));
+    reader.close();
+  }
+
+  @Test
+  public void testReadNegative1() throws IOException, ParseException {
+    String projection4 = new String("r3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection4);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, RowValue.get(0));
+    reader.close();
+
+  }
+}

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord3Map.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord3Map.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord3Map.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecord3Map.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,403 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.zebra.io;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.io.TableScanner;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Projection;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.DataBag;
+import org.apache.pig.data.DataByteArray;
+import org.apache.pig.data.Tuple;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ * Test projections on complicated column types.
+ * 
+ */
+public class TestRecord3Map {
+  static private Configuration conf;
+  static private Path path;
+  static private FileSystem fs;
+  static String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:map(float), f4:map(int)))";
+  static String STR_STORAGE = "[r1.f1, r1.f2]; [r2.r3.f4]; [r2.r3.f3#{x|y}, r2.r3.f4#{a|c}]";
+
+  @BeforeClass
+  public static void setUpOnce() throws IOException {
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), "TestRecord3Map");
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
+        STR_STORAGE, false, conf);
+    writer.finish();
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
+    int part = 0;
+    TableInserter inserter = writer1.getInserter("part" + part, true);
+    TypesUtils.resetTuple(tuple);
+
+    Tuple tupRecord1;
+    try {
+      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord2;
+    try {
+      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord3;
+    try {
+      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4:map(int)"));
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+    HashMap<String, Float> map1 = new HashMap<String, Float>();
+    // insert data in row 1
+    int row = 0;
+    // r1:record(f1:int, f2:long)
+    tupRecord1.set(0, 1);
+    tupRecord1.set(1, 1001L);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:map(float), map(int)))
+    HashMap<String, Integer> map = new HashMap<String, Integer>();
+    tupRecord2.set(0, tupRecord3);
+    map1.put("x", 1.3F);
+    tupRecord3.set(0, map1);
+    map.put("a", 1);
+    map.put("b", 2);
+    map.put("c", 3);
+    tupRecord3.set(1, map);
+    tuple.set(1, tupRecord2);
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // row 2
+    map1.clear();
+    row++;
+    TypesUtils.resetTuple(tuple);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupRecord2);
+    TypesUtils.resetTuple(tupRecord3);
+    // r1:record(f1:int, f2:map(long))
+    tupRecord1.set(0, 2);
+    tupRecord1.set(1, 1002L);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, f4))
+    tupRecord2.set(0, tupRecord3);
+    map.clear();
+    map.put("x", 11);
+    map.put("y", 12);
+    map.put("c", 13);
+
+    map1.put("y", 2.3F);
+    tupRecord3.set(0, map1);
+    tupRecord3.set(1, map);
+    tuple.set(1, tupRecord2);
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // finish building table, closing out the inserter, writer, writer1
+    inserter.close();
+    writer1.finish();
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws IOException {
+    BasicTable.drop(path, conf);
+  }
+
+  @Test
+  public void testReadSimpleRecord() throws IOException, ParseException {
+    // Starting read , simple projection 0, not record of record level. PASS
+    String projection0 = new String("r1.f1, r1.f2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection0);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, RowValue.get(0));
+    Long expected = 1001L;
+    Assert.assertEquals(expected, RowValue.get(1));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    expected = 1002L;
+    Assert.assertEquals(expected, RowValue.get(1));
+
+    reader.close();
+
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:map(float), f4:map(int)))"
+   * ; String STR_STORAGE =
+   * "[r1.f1, r1.f2]; [r2.r3.f4]; [r2.r3.f3#{x|y}, r2.r3.f4#{a|c}]";
+   */
+  @Test
+  public void testReadRecordOfMap1() throws IOException, ParseException {
+    String projection1 = new String("r2.r3.f4#{a|b}, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection1);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, ((Map) (RowValue.get(0))).get("a"));
+    Assert.assertEquals(2, ((Map) RowValue.get(0)).get("b"));
+    Assert.assertEquals(1.3F, ((Map<String, Float>) RowValue.get(1)).get("x"));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("a"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("b"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("x"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("y"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("c"));
+    Assert.assertEquals(2.3F, ((Map<String, Float>) RowValue.get(1)).get("y"));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int))";
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  @Test
+  public void testReadRecordOfRecord2() throws IOException, ParseException {
+    String projection2 = new String("r1.f1, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection2);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, RowValue.get(0));
+    Assert.assertEquals(1.3F, ((Map<String, Float>) RowValue.get(1)).get("x"));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    Assert.assertEquals(2.3F, ((Map<String, Float>) RowValue.get(1)).get("y"));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:map(float), f4:map(int)))"
+   * ; String STR_STORAGE =
+   * "[r1.f1, r1.f2]; [r2.r3.f4]; [r2.r3.f3#{x|y}, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  @Test
+  public void testReadRecordOfRecord3() throws IOException, ParseException {
+    String projection3 = new String("r1, r2, r2.r3.f3#{y|x}");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Assert.assertEquals(1, ((Tuple) RowValue.get(0)).get(0));
+    Assert.assertEquals(1.3F, ((Map<String, Float>) RowValue.get(2)).get("x"));
+    // Assert.assertEquals(null, ((Map<String,
+    // Float>)RowValue.get(2)).get("x"));
+    Assert.assertEquals(null, ((Map<String, Float>) RowValue.get(2)).get("y"));
+    Assert.assertEquals(null, ((Map<String, Float>) RowValue.get(2)).get("z"));
+    Tuple r2 = (Tuple) RowValue.get(1);
+    Tuple r3 = (Tuple) r2.get(0);
+    Map<String, Integer> f4 = (Map<String, Integer>) r3.get(1);
+    Integer tmp = 1;
+    Assert.assertEquals(tmp, f4.get("a"));
+    tmp = 2;
+    Assert.assertEquals(tmp, f4.get("b"));
+    tmp = 3;
+    Assert.assertEquals(tmp, f4.get("c"));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, ((Tuple) RowValue.get(0)).get(0));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("x"));
+    // Assert.assertEquals(null, ((Map<String,
+    // Float>)RowValue.get(2)).get("y"));
+    Assert.assertEquals(2.3F, ((Map<String, Long>) RowValue.get(2)).get("y"));
+    Assert.assertEquals(null, ((Map<String, Long>) RowValue.get(2)).get("z"));
+    r2 = (Tuple) RowValue.get(1);
+    r3 = (Tuple) r2.get(0);
+    f4 = (Map<String, Integer>) r3.get(1);
+    tmp = 11;
+    Assert.assertEquals(tmp, f4.get("x"));
+    tmp = 12;
+    Assert.assertEquals(tmp, f4.get("y"));
+    tmp = 13;
+    Assert.assertEquals(tmp, f4.get("c"));
+
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:map(float), f4:map(int)))"
+   * ; String STR_STORAGE =
+   * "[r1.f1, r1.f2]; [r2.r3.f4]; [r2.r3.f3#{x|y}, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  @Test
+  public void testReadRecordOfRecord4() throws IOException, ParseException {
+    String projection3 = new String("r2.r3.f4");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Map<String, Integer> f4 = (Map<String, Integer>) RowValue.get(0);
+    Integer tmp = 1;
+    Assert.assertEquals(tmp, f4.get("a"));
+    tmp = 2;
+    Assert.assertEquals(tmp, f4.get("b"));
+    tmp = 3;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(null, f4.get("x"));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+
+    f4 = (Map<String, Integer>) RowValue.get(0);
+    tmp = 11;
+    Assert.assertEquals(tmp, f4.get("x"));
+    tmp = 12;
+    Assert.assertEquals(tmp, f4.get("y"));
+    tmp = 13;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(null, f4.get("a"));
+    reader.close();
+  }
+
+  // test projection, negative, none-exist column name. FAILED, It crashes
+  // here
+  @Test
+  public void testReadNegative1() throws IOException, ParseException {
+    String projection4 = new String("r3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection4);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, RowValue.get(0));
+    reader.close();
+
+  }
+}

Added: hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecordMap.java
URL: http://svn.apache.org/viewvc/hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecordMap.java?rev=803312&view=auto
==============================================================================
--- hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecordMap.java (added)
+++ hadoop/pig/trunk/contrib/zebra/src/test/org/apache/hadoop/zebra/io/TestRecordMap.java Tue Aug 11 22:27:44 2009
@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.zebra.io;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.StringTokenizer;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.zebra.io.BasicTable;
+import org.apache.hadoop.zebra.io.TableInserter;
+import org.apache.hadoop.zebra.io.TableScanner;
+import org.apache.hadoop.zebra.io.BasicTable.Reader.RangeSplit;
+import org.apache.hadoop.zebra.types.ParseException;
+import org.apache.hadoop.zebra.types.Projection;
+import org.apache.hadoop.zebra.types.Schema;
+import org.apache.hadoop.zebra.types.TypesUtils;
+import org.apache.pig.backend.executionengine.ExecException;
+import org.apache.pig.data.DataBag;
+import org.apache.pig.data.DataByteArray;
+import org.apache.pig.data.Tuple;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ * Test projections on complicated column types.
+ * 
+ */
+public class TestRecordMap {
+  static private Configuration conf;
+  static private Path path;
+  static private FileSystem fs;
+  static int count;
+  static String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int)))";
+  static String STR_STORAGE = "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+
+  @BeforeClass
+  public static void setUpOnce() throws IOException {
+    conf = new Configuration();
+    conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
+    conf.setInt("table.input.split.minSize", 64 * 1024);
+    conf.set("table.output.tfile.compression", "none");
+
+    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
+    fs = new LocalFileSystem(rawLFS);
+    path = new Path(fs.getWorkingDirectory(), "TestRecordMap");
+    fs = path.getFileSystem(conf);
+    // drop any previous tables
+    BasicTable.drop(path, conf);
+    // Build Table and column groups
+    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
+        STR_STORAGE, false, conf);
+    writer.finish();
+    Schema schema = writer.getSchema();
+    Tuple tuple = TypesUtils.createTuple(schema);
+    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
+    int part = 0;
+    TableInserter inserter = writer1.getInserter("part" + part, true);
+    TypesUtils.resetTuple(tuple);
+
+    Tuple tupRecord1;
+    try {
+      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord2;
+    try {
+      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
+          .getSchema());
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+
+    Tuple tupRecord3;
+    try {
+      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4:map(int)"));
+    } catch (ParseException e) {
+      e.printStackTrace();
+      throw new IOException(e);
+    }
+    // insert data in row 1
+    int row = 0;
+    // r1:record(f1:int, f2:long
+    tupRecord1.set(0, 1);
+    tupRecord1.set(1, 1001L);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, map(int)))
+    HashMap<String, Integer> map = new HashMap<String, Integer>();
+    tupRecord2.set(0, tupRecord3);
+    tupRecord3.set(0, 1.3);
+    map.put("a", 1);
+    map.put("b", 2);
+    map.put("c", 3);
+    tupRecord3.set(1, map);
+    tuple.set(1, tupRecord2);
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // row 2
+    row++;
+    TypesUtils.resetTuple(tuple);
+    TypesUtils.resetTuple(tupRecord1);
+    TypesUtils.resetTuple(tupRecord2);
+    TypesUtils.resetTuple(tupRecord3);
+    // r1:record(f1:int, f2:long
+    tupRecord1.set(0, 2);
+    tupRecord1.set(1, 1002L);
+    tuple.set(0, tupRecord1);
+
+    // r2:record(r3:record(f3:float, f4))
+    tupRecord2.set(0, tupRecord3);
+    map.clear();
+    map.put("x", 11);
+    map.put("y", 12);
+    map.put("c", 13);
+
+    tupRecord3.set(0, 2.3);
+    tupRecord3.set(1, map);
+    tuple.set(1, tupRecord2);
+
+    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
+        .getBytes()), tuple);
+
+    // finish building table, closing out the inserter, writer, writer1
+    inserter.close();
+    writer1.finish();
+    writer.close();
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws IOException {
+    BasicTable.drop(path, conf);
+  }
+
+  @Test
+  public void testReadSimpleRecord() throws IOException, ParseException {
+    // Starting read , simple projection 0, not record of record level. PASS
+    String projection0 = new String("r1.f1, r1.f2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection0);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, RowValue.get(0));
+    Assert.assertEquals(1001L, RowValue.get(1));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    Assert.assertEquals(1002L, RowValue.get(1));
+
+    reader.close();
+
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, map(int))";
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  @Test
+  public void testReadRecordOfMap1() throws IOException, ParseException {
+    String projection1 = new String("r2.r3.f4#{a|b}, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection1);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, ((Map) (RowValue.get(0))).get("a"));
+    Assert.assertEquals(2, ((Map) RowValue.get(0)).get("b"));
+    Assert.assertEquals(1.3, RowValue.get(1));
+
+    scanner.advance();
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("a"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("b"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("x"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("y"));
+    Assert.assertEquals(null, ((Map) RowValue.get(0)).get("c"));
+    Assert.assertEquals(2.3, RowValue.get(1));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int))";
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  @Test
+  public void testReadRecordOfRecord2() throws IOException, ParseException {
+    String projection2 = new String("r1.f1, r2.r3.f3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection2);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(1, RowValue.get(0));
+    Assert.assertEquals(1.3, RowValue.get(1));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, RowValue.get(0));
+    Assert.assertEquals(2.3, RowValue.get(1));
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int)))" ;
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  @Test
+  public void testReadRecordOfRecord3() throws IOException, ParseException {
+    String projection3 = new String("r1, r2");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Assert.assertEquals(1, ((Tuple) RowValue.get(0)).get(0));
+    Assert.assertEquals(1001L, ((Tuple) RowValue.get(0)).get(1));
+    Tuple r2 = (Tuple) RowValue.get(1);
+    Tuple r3 = (Tuple) r2.get(0);
+    Map<String, Integer> f4 = (Map<String, Integer>) r3.get(1);
+    Integer tmp = 1;
+    Assert.assertEquals(tmp, f4.get("a"));
+    tmp = 2;
+    Assert.assertEquals(tmp, f4.get("b"));
+    tmp = 3;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(1.3, ((Tuple) ((Tuple) RowValue.get(1)).get(0)).get(0));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(2, ((Tuple) RowValue.get(0)).get(0));
+    Assert.assertEquals(1002L, ((Tuple) RowValue.get(0)).get(1));
+    r2 = (Tuple) RowValue.get(1);
+    r3 = (Tuple) r2.get(0);
+    f4 = (Map<String, Integer>) r3.get(1);
+    tmp = 11;
+    Assert.assertEquals(tmp, f4.get("x"));
+    tmp = 12;
+    Assert.assertEquals(tmp, f4.get("y"));
+    tmp = 13;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(2.3, ((Tuple) ((Tuple) RowValue.get(1)).get(0)).get(0));
+
+    reader.close();
+  }
+
+  /*
+   * String STR_SCHEMA =
+   * "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4:map(int)))" ;
+   * String STR_STORAGE =
+   * "[r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3, r2.r3.f4#{a|c}]";
+   */
+  // test stitch, [r1.f1]; [r2.r3.f4]; [r1.f2, r2.r3.f3]
+  @Test
+  public void testReadRecordOfRecord4() throws IOException, ParseException {
+    String projection3 = new String("r2.r3.f4");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection3);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+
+    Map<String, Integer> f4 = (Map<String, Integer>) RowValue.get(0);
+    Integer tmp = 1;
+    Assert.assertEquals(tmp, f4.get("a"));
+    tmp = 2;
+    Assert.assertEquals(tmp, f4.get("b"));
+    tmp = 3;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(null, f4.get("x"));
+
+    scanner.advance();
+
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
+    scanner.getValue(RowValue);
+
+    f4 = (Map<String, Integer>) RowValue.get(0);
+    tmp = 11;
+    Assert.assertEquals(tmp, f4.get("x"));
+    tmp = 12;
+    Assert.assertEquals(tmp, f4.get("y"));
+    tmp = 13;
+    Assert.assertEquals(tmp, f4.get("c"));
+    Assert.assertEquals(null, f4.get("a"));
+    reader.close();
+  }
+
+  // test projection, negative, none-exist column name.
+  @Test
+  public void testReadNegative1() throws IOException, ParseException {
+    String projection4 = new String("r3");
+    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
+    reader.setProjection(projection4);
+    List<RangeSplit> splits = reader.rangeSplit(1);
+    TableScanner scanner = reader.getScanner(splits.get(0), true);
+    scanner = reader.getScanner(splits.get(0), true);
+    BytesWritable key = new BytesWritable();
+    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
+    scanner.getKey(key);
+    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
+    scanner.getValue(RowValue);
+    Assert.assertEquals(null, RowValue.get(0));
+    reader.close();
+
+  }
+}