You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tajo.apache.org by hy...@apache.org on 2014/12/12 06:59:50 UTC

[16/32] tajo git commit: TAJO-1233: Merge hbase_storage branch to the master branch. (Hyoungjun Kim via hyunsik)

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java b/tajo-storage/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
deleted file mode 100644
index c0dda1f..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.storage.index;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
-import org.apache.tajo.common.TajoDataTypes.Type;
-import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.conf.TajoConf.ConfVars;
-import org.apache.tajo.datum.DatumFactory;
-import org.apache.tajo.storage.*;
-import org.apache.tajo.storage.fragment.FileFragment;
-import org.apache.tajo.storage.index.bst.BSTIndex;
-import org.apache.tajo.storage.index.bst.BSTIndex.BSTIndexReader;
-import org.apache.tajo.storage.index.bst.BSTIndex.BSTIndexWriter;
-import org.apache.tajo.util.CommonTestingUtil;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.apache.tajo.storage.CSVFile.CSVScanner;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class TestSingleCSVFileBSTIndex {
-  
-  private TajoConf conf;
-  private Schema schema;
-  private TableMeta meta;
-  private FileSystem fs;
-
-  private static final int TUPLE_NUM = 10000;
-  private static final int LOAD_NUM = 100;
-  private static final String TEST_PATH = "target/test-data/TestSingleCSVFileBSTIndex";
-  private Path testDir;
-  
-  public TestSingleCSVFileBSTIndex() {
-    conf = new TajoConf();
-    conf.setVar(ConfVars.ROOT_DIR, TEST_PATH);
-    schema = new Schema();
-    schema.addColumn(new Column("int", Type.INT4));
-    schema.addColumn(new Column("long", Type.INT8));
-    schema.addColumn(new Column("double", Type.FLOAT8));
-    schema.addColumn(new Column("float", Type.FLOAT4));
-    schema.addColumn(new Column("string", Type.TEXT));
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
-    fs = testDir.getFileSystem(conf);
-  }
-
-  @Test
-  public void testFindValueInSingleCSV() throws IOException {
-    meta = CatalogUtil.newTableMeta(StoreType.CSV);
-
-    Path tablePath = StorageUtil.concatPath(testDir, "testFindValueInSingleCSV", "table.csv");
-    fs.mkdirs(tablePath.getParent());
-
-    Appender appender = StorageManager.getStorageManager(conf).getAppender(meta, schema, tablePath);
-    appender.init();
-    Tuple tuple;
-    for (int i = 0; i < TUPLE_NUM; i++) {
-      tuple = new VTuple(5);
-      tuple.put(0, DatumFactory.createInt4(i));
-      tuple.put(1, DatumFactory.createInt8(i));
-      tuple.put(2, DatumFactory.createFloat8(i));
-      tuple.put(3, DatumFactory.createFloat4(i));
-      tuple.put(4, DatumFactory.createText("field_" + i));
-      appender.addTuple(tuple);
-    }
-    appender.close();
-
-    FileStatus status = fs.getFileStatus(tablePath);
-    long fileLen = status.getLen();
-    FileFragment tablet = new FileFragment("table1_1", status.getPath(), 0, fileLen);
-
-    SortSpec[] sortKeys = new SortSpec[2];
-    sortKeys[0] = new SortSpec(schema.getColumn("long"), true, false);
-    sortKeys[1] = new SortSpec(schema.getColumn("double"), true, false);
-
-    Schema keySchema = new Schema();
-    keySchema.addColumn(new Column("long", Type.INT8));
-    keySchema.addColumn(new Column("double", Type.FLOAT8));
-
-    BaseTupleComparator comp = new BaseTupleComparator(keySchema, sortKeys);
-
-    BSTIndex bst = new BSTIndex(conf);
-    BSTIndexWriter creater = bst.getIndexWriter(new Path(testDir,
-        "FindValueInCSV.idx"), BSTIndex.TWO_LEVEL_INDEX, keySchema, comp);
-    creater.setLoadNum(LOAD_NUM);
-    creater.open();
-
-    SeekableScanner fileScanner = new CSVScanner(conf, schema, meta, tablet);
-    fileScanner.init();
-    Tuple keyTuple;
-    long offset;
-    while (true) {
-      keyTuple = new VTuple(2);
-      offset = fileScanner.getNextOffset();
-      tuple = fileScanner.next();
-      if (tuple == null)
-        break;
-
-      keyTuple.put(0, tuple.get(1));
-      keyTuple.put(1, tuple.get(2));
-      creater.write(keyTuple, offset);
-    }
-
-    creater.flush();
-    creater.close();
-    fileScanner.close();
-
-    tuple = new VTuple(keySchema.size());
-    BSTIndexReader reader = bst.getIndexReader(new Path(testDir,
-        "FindValueInCSV.idx"), keySchema, comp);
-    reader.open();
-    fileScanner = new CSVScanner(conf, schema, meta, tablet);
-    fileScanner.init();
-    for (int i = 0; i < TUPLE_NUM - 1; i++) {
-      tuple.put(0, DatumFactory.createInt8(i));
-      tuple.put(1, DatumFactory.createFloat8(i));
-      long offsets = reader.find(tuple);
-      fileScanner.seek(offsets);
-      tuple = fileScanner.next();
-      assertEquals(i,  (tuple.get(1).asInt8()));
-      assertEquals(i, (tuple.get(2).asFloat8()) , 0.01);
-
-      offsets = reader.next();
-      if (offsets == -1) {
-        continue;
-      }
-      fileScanner.seek(offsets);
-      tuple = fileScanner.next();
-      assertTrue("[seek check " + (i + 1) + " ]",
-          (i + 1) == (tuple.get(0).asInt4()));
-      assertTrue("[seek check " + (i + 1) + " ]",
-          (i + 1) == (tuple.get(1).asInt8()));
-    }
-  }
-
-  @Test
-  public void testFindNextKeyValueInSingleCSV() throws IOException {
-    meta = CatalogUtil.newTableMeta(StoreType.CSV);
-
-    Path tablePath = StorageUtil.concatPath(testDir, "testFindNextKeyValueInSingleCSV",
-        "table1.csv");
-    fs.mkdirs(tablePath.getParent());
-    Appender appender = StorageManager.getStorageManager(conf).getAppender(meta, schema, tablePath);
-    appender.init();
-    Tuple tuple;
-    for(int i = 0 ; i < TUPLE_NUM; i ++ ) {
-      tuple = new VTuple(5);
-      tuple.put(0, DatumFactory.createInt4(i));
-      tuple.put(1, DatumFactory.createInt8(i));
-      tuple.put(2, DatumFactory.createFloat8(i));
-      tuple.put(3, DatumFactory.createFloat4(i));
-      tuple.put(4, DatumFactory.createText("field_" + i));
-      appender.addTuple(tuple);
-    }
-    appender.close();
-
-    FileStatus status = fs.getFileStatus(tablePath);
-    long fileLen = status.getLen();
-    FileFragment tablet = new FileFragment("table1_1", status.getPath(), 0, fileLen);
-    
-    SortSpec [] sortKeys = new SortSpec[2];
-    sortKeys[0] = new SortSpec(schema.getColumn("int"), true, false);
-    sortKeys[1] = new SortSpec(schema.getColumn("long"), true, false);
-
-    Schema keySchema = new Schema();
-    keySchema.addColumn(new Column("int", Type.INT4));
-    keySchema.addColumn(new Column("long", Type.INT8));
-
-    BaseTupleComparator comp = new BaseTupleComparator(keySchema, sortKeys);
-    
-    BSTIndex bst = new BSTIndex(conf);
-    BSTIndexWriter creater = bst.getIndexWriter(new Path(testDir, "FindNextKeyValueInCSV.idx"),
-        BSTIndex.TWO_LEVEL_INDEX, keySchema, comp);
-    creater.setLoadNum(LOAD_NUM);
-    creater.open();
-    
-    SeekableScanner fileScanner  = new CSVScanner(conf, schema, meta, tablet);
-    fileScanner.init();
-    Tuple keyTuple;
-    long offset;
-    while (true) {
-      keyTuple = new VTuple(2);
-      offset = fileScanner.getNextOffset();
-      tuple = fileScanner.next();
-      if (tuple == null) break;
-      
-      keyTuple.put(0, tuple.get(0));
-      keyTuple.put(1, tuple.get(1));
-      creater.write(keyTuple, offset);
-    }
-    
-    creater.flush();
-    creater.close();
-    fileScanner.close();    
-    
-    BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "FindNextKeyValueInCSV.idx"), keySchema, comp);
-    reader.open();
-    fileScanner  = new CSVScanner(conf, schema, meta, tablet);
-    fileScanner.init();
-    Tuple result;
-    for(int i = 0 ; i < TUPLE_NUM -1 ; i ++) {
-      keyTuple = new VTuple(2);
-      keyTuple.put(0, DatumFactory.createInt4(i));
-      keyTuple.put(1, DatumFactory.createInt8(i));
-      long offsets = reader.find(keyTuple, true);
-      fileScanner.seek(offsets);
-      result = fileScanner.next();
-      assertTrue("[seek check " + (i + 1) + " ]" , (i + 1) == (result.get(0).asInt4()));
-      assertTrue("[seek check " + (i + 1) + " ]" , (i + 1) == (result.get(1).asInt8()));
-      
-      offsets = reader.next();
-      if (offsets == -1) {
-        continue;
-      }
-      fileScanner.seek(offsets);
-      result = fileScanner.next();
-      assertTrue("[seek check " + (i + 2) + " ]" , (i + 2) == (result.get(0).asInt8()));
-      assertTrue("[seek check " + (i + 2) + " ]" , (i + 2) == (result.get(1).asFloat8()));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java b/tajo-storage/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
deleted file mode 100644
index 038bc17..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.storage.json;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.tajo.catalog.CatalogUtil;
-import org.apache.tajo.catalog.Schema;
-import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos;
-import org.apache.tajo.common.TajoDataTypes;
-import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.datum.Datum;
-import org.apache.tajo.datum.DatumFactory;
-import org.apache.tajo.datum.NullDatum;
-import org.apache.tajo.storage.Scanner;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
-import org.apache.tajo.storage.fragment.FileFragment;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.net.URL;
-
-import static org.junit.Assert.*;
-
-public class TestJsonSerDe {
-  private static Schema schema = new Schema();
-
-  static {
-    schema.addColumn("col1", TajoDataTypes.Type.BOOLEAN);
-    schema.addColumn("col2", TajoDataTypes.Type.CHAR, 7);
-    schema.addColumn("col3", TajoDataTypes.Type.INT2);
-    schema.addColumn("col4", TajoDataTypes.Type.INT4);
-    schema.addColumn("col5", TajoDataTypes.Type.INT8);
-    schema.addColumn("col6", TajoDataTypes.Type.FLOAT4);
-    schema.addColumn("col7", TajoDataTypes.Type.FLOAT8);
-    schema.addColumn("col8", TajoDataTypes.Type.TEXT);
-    schema.addColumn("col9", TajoDataTypes.Type.BLOB);
-    schema.addColumn("col10", TajoDataTypes.Type.INET4);
-    schema.addColumn("col11", TajoDataTypes.Type.NULL_TYPE);
-  }
-
-  public static Path getResourcePath(String path, String suffix) {
-    URL resultBaseURL = ClassLoader.getSystemResource(path);
-    return new Path(resultBaseURL.toString(), suffix);
-  }
-
-  @Test
-  public void testVarioutType() throws IOException {
-    TajoConf conf = new TajoConf();
-
-    TableMeta meta = CatalogUtil.newTableMeta(CatalogProtos.StoreType.JSON);
-    Path tablePath = new Path(getResourcePath("dataset", "TestJsonSerDe"), "testVariousType.json");
-    FileSystem fs = FileSystem.getLocal(conf);
-    FileStatus status = fs.getFileStatus(tablePath);
-    FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner =  StorageManager.getStorageManager(conf).getScanner(meta, schema, fragment);
-    scanner.init();
-
-    Tuple tuple = scanner.next();
-    assertNotNull(tuple);
-    assertNull(scanner.next());
-    scanner.close();
-
-    Tuple baseTuple = new VTuple(11);
-    baseTuple.put(new Datum[] {
-        DatumFactory.createBool(true),                  // 0
-        DatumFactory.createChar("hyunsik"),             // 1
-        DatumFactory.createInt2((short) 17),            // 2
-        DatumFactory.createInt4(59),                    // 3
-        DatumFactory.createInt8(23l),                   // 4
-        DatumFactory.createFloat4(77.9f),               // 5
-        DatumFactory.createFloat8(271.9d),              // 6
-        DatumFactory.createText("hyunsik"),             // 7
-        DatumFactory.createBlob("hyunsik".getBytes()),  // 8
-        DatumFactory.createInet4("192.168.0.1"),        // 9
-        NullDatum.get(),                                // 10
-    });
-
-    assertEquals(baseTuple, tuple);
-  }
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestReadWrite.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestReadWrite.java b/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestReadWrite.java
deleted file mode 100644
index 0a01dc4..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestReadWrite.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.storage.parquet;
-
-import com.google.common.base.Charsets;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.Schema;
-import org.apache.tajo.common.TajoDataTypes.Type;
-import org.apache.tajo.datum.DatumFactory;
-import org.apache.tajo.datum.NullDatum;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
-
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-
-public class TestReadWrite {
-  private static final String HELLO = "hello";
-
-  private Path createTmpFile() throws IOException {
-    File tmp = File.createTempFile(getClass().getSimpleName(), ".tmp");
-    tmp.deleteOnExit();
-    tmp.delete();
-
-    // it prevents accessing HDFS namenode of TajoTestingCluster.
-    LocalFileSystem localFS = LocalFileSystem.getLocal(new Configuration());
-    return localFS.makeQualified(new Path(tmp.getPath()));
-  }
-
-  private Schema createAllTypesSchema() {
-    List<Column> columns = new ArrayList<Column>();
-    columns.add(new Column("myboolean", Type.BOOLEAN));
-    columns.add(new Column("mybit", Type.BIT));
-    columns.add(new Column("mychar", Type.CHAR));
-    columns.add(new Column("myint2", Type.INT2));
-    columns.add(new Column("myint4", Type.INT4));
-    columns.add(new Column("myint8", Type.INT8));
-    columns.add(new Column("myfloat4", Type.FLOAT4));
-    columns.add(new Column("myfloat8", Type.FLOAT8));
-    columns.add(new Column("mytext", Type.TEXT));
-    columns.add(new Column("myblob", Type.BLOB));
-    columns.add(new Column("mynull", Type.NULL_TYPE));
-    Column[] columnsArray = new Column[columns.size()];
-    columnsArray = columns.toArray(columnsArray);
-    return new Schema(columnsArray);
-  }
-
-  @Test
-  public void testAll() throws Exception {
-    Path file = createTmpFile();
-    Schema schema = createAllTypesSchema();
-    Tuple tuple = new VTuple(schema.size());
-    tuple.put(0, DatumFactory.createBool(true));
-    tuple.put(1, DatumFactory.createBit((byte)128));
-    tuple.put(2, DatumFactory.createChar('t'));
-    tuple.put(3, DatumFactory.createInt2((short)2048));
-    tuple.put(4, DatumFactory.createInt4(4096));
-    tuple.put(5, DatumFactory.createInt8(8192L));
-    tuple.put(6, DatumFactory.createFloat4(0.2f));
-    tuple.put(7, DatumFactory.createFloat8(4.1));
-    tuple.put(8, DatumFactory.createText(HELLO));
-    tuple.put(9, DatumFactory.createBlob(HELLO.getBytes(Charsets.UTF_8)));
-    tuple.put(10, NullDatum.get());
-
-    TajoParquetWriter writer = new TajoParquetWriter(file, schema);
-    writer.write(tuple);
-    writer.close();
-
-    TajoParquetReader reader = new TajoParquetReader(file, schema);
-    tuple = reader.read();
-
-    assertNotNull(tuple);
-    assertEquals(true, tuple.getBool(0));
-    assertEquals((byte)128, tuple.getByte(1));
-    assertTrue(String.valueOf('t').equals(String.valueOf(tuple.getChar(2))));
-    assertEquals((short)2048, tuple.getInt2(3));
-    assertEquals(4096, tuple.getInt4(4));
-    assertEquals(8192L, tuple.getInt8(5));
-    assertEquals(new Float(0.2f), new Float(tuple.getFloat4(6)));
-    assertEquals(new Double(4.1), new Double(tuple.getFloat8(7)));
-    assertTrue(HELLO.equals(tuple.getText(8)));
-    assertArrayEquals(HELLO.getBytes(Charsets.UTF_8), tuple.getBytes(9));
-    assertEquals(NullDatum.get(), tuple.get(10));
-  }
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestSchemaConverter.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestSchemaConverter.java b/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestSchemaConverter.java
deleted file mode 100644
index 49a162b..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/storage/parquet/TestSchemaConverter.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.storage.parquet;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.tajo.catalog.Schema;
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.common.TajoDataTypes.Type;
-
-import org.junit.Test;
-
-import parquet.schema.MessageType;
-import parquet.schema.MessageTypeParser;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Tests for {@link TajoSchemaConverter}.
- */
-public class TestSchemaConverter {
-  private static final String ALL_PARQUET_SCHEMA =
-      "message table_schema {\n" +
-      "  optional boolean myboolean;\n" +
-      "  optional int32 myint;\n" +
-      "  optional int64 mylong;\n" +
-      "  optional float myfloat;\n" +
-      "  optional double mydouble;\n" +
-      "  optional binary mybytes;\n" +
-      "  optional binary mystring (UTF8);\n" +
-      "  optional fixed_len_byte_array(1) myfixed;\n" +
-      "}\n";
-
-  private static final String CONVERTED_ALL_PARQUET_SCHEMA =
-      "message table_schema {\n" +
-      "  optional boolean myboolean;\n" +
-      "  optional int32 mybit;\n" +
-      "  optional binary mychar (UTF8);\n" +
-      "  optional int32 myint2;\n" +
-      "  optional int32 myint4;\n" +
-      "  optional int64 myint8;\n" +
-      "  optional float myfloat4;\n" +
-      "  optional double myfloat8;\n" +
-      "  optional binary mytext (UTF8);\n" +
-      "  optional binary myblob;\n" +
-      // NULL_TYPE fields are not encoded.
-      "  optional binary myinet4;\n" +
-      "  optional binary myprotobuf;\n" +
-      "}\n";
-
-  private Schema createAllTypesSchema() {
-    List<Column> columns = new ArrayList<Column>();
-    columns.add(new Column("myboolean", Type.BOOLEAN));
-    columns.add(new Column("mybit", Type.BIT));
-    columns.add(new Column("mychar", Type.CHAR));
-    columns.add(new Column("myint2", Type.INT2));
-    columns.add(new Column("myint4", Type.INT4));
-    columns.add(new Column("myint8", Type.INT8));
-    columns.add(new Column("myfloat4", Type.FLOAT4));
-    columns.add(new Column("myfloat8", Type.FLOAT8));
-    columns.add(new Column("mytext", Type.TEXT));
-    columns.add(new Column("myblob", Type.BLOB));
-    columns.add(new Column("mynull", Type.NULL_TYPE));
-    columns.add(new Column("myinet4", Type.INET4));
-    columns.add(new Column("myprotobuf", Type.PROTOBUF));
-    Column[] columnsArray = new Column[columns.size()];
-    columnsArray = columns.toArray(columnsArray);
-    return new Schema(columnsArray);
-  }
-
-  private Schema createAllTypesConvertedSchema() {
-    List<Column> columns = new ArrayList<Column>();
-    columns.add(new Column("myboolean", Type.BOOLEAN));
-    columns.add(new Column("myint", Type.INT4));
-    columns.add(new Column("mylong", Type.INT8));
-    columns.add(new Column("myfloat", Type.FLOAT4));
-    columns.add(new Column("mydouble", Type.FLOAT8));
-    columns.add(new Column("mybytes", Type.BLOB));
-    columns.add(new Column("mystring", Type.TEXT));
-    columns.add(new Column("myfixed", Type.BLOB));
-    Column[] columnsArray = new Column[columns.size()];
-    columnsArray = columns.toArray(columnsArray);
-    return new Schema(columnsArray);
-  }
-
-  private void testTajoToParquetConversion(
-      Schema tajoSchema, String schemaString) throws Exception {
-    TajoSchemaConverter converter = new TajoSchemaConverter();
-    MessageType schema = converter.convert(tajoSchema);
-    MessageType expected = MessageTypeParser.parseMessageType(schemaString);
-    assertEquals("converting " + schema + " to " + schemaString,
-                 expected.toString(), schema.toString());
-  }
-
-  private void testParquetToTajoConversion(
-      Schema tajoSchema, String schemaString) throws Exception {
-    TajoSchemaConverter converter = new TajoSchemaConverter();
-    Schema schema = converter.convert(
-        MessageTypeParser.parseMessageType(schemaString));
-    assertEquals("converting " + schemaString + " to " + tajoSchema,
-                 tajoSchema.toString(), schema.toString());
-  }
-
-  @Test
-  public void testAllTypesToParquet() throws Exception {
-    Schema schema = createAllTypesSchema();
-    testTajoToParquetConversion(schema, CONVERTED_ALL_PARQUET_SCHEMA);
-  }
-
-  @Test
-  public void testAllTypesToTajo() throws Exception {
-    Schema schema = createAllTypesConvertedSchema();
-    testParquetToTajoConversion(schema, ALL_PARQUET_SCHEMA);
-  }
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/tuple/TestBaseTupleBuilder.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/tuple/TestBaseTupleBuilder.java b/tajo-storage/src/test/java/org/apache/tajo/tuple/TestBaseTupleBuilder.java
deleted file mode 100644
index b332364..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/tuple/TestBaseTupleBuilder.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.tuple;
-
-import org.apache.tajo.storage.RowStoreUtil;
-import org.apache.tajo.tuple.offheap.*;
-import org.junit.Test;
-
-public class TestBaseTupleBuilder {
-
-  @Test
-  public void testBuild() {
-    BaseTupleBuilder builder = new BaseTupleBuilder(TestOffHeapRowBlock.schema);
-
-    OffHeapRowBlock rowBlock = TestOffHeapRowBlock.createRowBlock(10248);
-    OffHeapRowBlockReader reader = rowBlock.getReader();
-
-    ZeroCopyTuple inputTuple = new ZeroCopyTuple();
-
-    HeapTuple heapTuple = null;
-    ZeroCopyTuple zcTuple = null;
-    int i = 0;
-    while(reader.next(inputTuple)) {
-      RowStoreUtil.convert(inputTuple, builder);
-
-      heapTuple = builder.buildToHeapTuple();
-      TestOffHeapRowBlock.validateTupleResult(i, heapTuple);
-
-      zcTuple = builder.buildToZeroCopyTuple();
-      TestOffHeapRowBlock.validateTupleResult(i, zcTuple);
-
-      i++;
-    }
-  }
-
-  @Test
-  public void testBuildWithNull() {
-    BaseTupleBuilder builder = new BaseTupleBuilder(TestOffHeapRowBlock.schema);
-
-    OffHeapRowBlock rowBlock = TestOffHeapRowBlock.createRowBlockWithNull(10248);
-    OffHeapRowBlockReader reader = rowBlock.getReader();
-
-    ZeroCopyTuple inputTuple = new ZeroCopyTuple();
-
-    HeapTuple heapTuple = null;
-    ZeroCopyTuple zcTuple = null;
-    int i = 0;
-    while(reader.next(inputTuple)) {
-      RowStoreUtil.convert(inputTuple, builder);
-
-      heapTuple = builder.buildToHeapTuple();
-      TestOffHeapRowBlock.validateNullity(i, heapTuple);
-
-      zcTuple = builder.buildToZeroCopyTuple();
-      TestOffHeapRowBlock.validateNullity(i, zcTuple);
-
-      i++;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestHeapTuple.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestHeapTuple.java b/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestHeapTuple.java
deleted file mode 100644
index 96f465a..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestHeapTuple.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/***
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.tuple.offheap;
-
-import org.apache.tajo.catalog.SchemaUtil;
-import org.junit.Test;
-
-public class TestHeapTuple {
-
-  @Test
-  public void testHeapTuple() {
-    OffHeapRowBlock rowBlock = TestOffHeapRowBlock.createRowBlock(1024);
-
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(rowBlock);
-
-    ZeroCopyTuple zcTuple = new ZeroCopyTuple();
-    int i = 0;
-    while (reader.next(zcTuple)) {
-      byte [] bytes = new byte[zcTuple.nioBuffer().limit()];
-      zcTuple.nioBuffer().get(bytes);
-
-      HeapTuple heapTuple = new HeapTuple(bytes, SchemaUtil.toDataTypes(TestOffHeapRowBlock.schema));
-      TestOffHeapRowBlock.validateTupleResult(i, heapTuple);
-      i++;
-    }
-
-    rowBlock.release();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestOffHeapRowBlock.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestOffHeapRowBlock.java b/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestOffHeapRowBlock.java
deleted file mode 100644
index c43ba38..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestOffHeapRowBlock.java
+++ /dev/null
@@ -1,577 +0,0 @@
-/***
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.tuple.offheap;
-
-import com.google.common.collect.Lists;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.tajo.catalog.*;
-import org.apache.tajo.common.TajoDataTypes;
-import org.apache.tajo.datum.DatumFactory;
-import org.apache.tajo.datum.ProtobufDatum;
-import org.apache.tajo.rpc.protocolrecords.PrimitiveProtos;
-import org.apache.tajo.storage.BaseTupleComparator;
-import org.apache.tajo.storage.RowStoreUtil;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
-import org.apache.tajo.unit.StorageUnit;
-import org.apache.tajo.util.FileUtil;
-import org.apache.tajo.util.ProtoUtil;
-import org.junit.Test;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-
-import static org.apache.tajo.common.TajoDataTypes.Type;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class TestOffHeapRowBlock {
-  private static final Log LOG = LogFactory.getLog(TestOffHeapRowBlock.class);
-  public static String UNICODE_FIELD_PREFIX = "abc_가나다_";
-  public static Schema schema;
-
-  static {
-    schema = new Schema();
-    schema.addColumn("col0", Type.BOOLEAN);
-    schema.addColumn("col1", Type.INT2);
-    schema.addColumn("col2", Type.INT4);
-    schema.addColumn("col3", Type.INT8);
-    schema.addColumn("col4", Type.FLOAT4);
-    schema.addColumn("col5", Type.FLOAT8);
-    schema.addColumn("col6", Type.TEXT);
-    schema.addColumn("col7", Type.TIMESTAMP);
-    schema.addColumn("col8", Type.DATE);
-    schema.addColumn("col9", Type.TIME);
-    schema.addColumn("col10", Type.INTERVAL);
-    schema.addColumn("col11", Type.INET4);
-    schema.addColumn("col12",
-        CatalogUtil.newDataType(TajoDataTypes.Type.PROTOBUF, PrimitiveProtos.StringProto.class.getName()));
-  }
-
-  private void explainRowBlockAllocation(OffHeapRowBlock rowBlock, long startTime, long endTime) {
-    LOG.info(FileUtil.humanReadableByteCount(rowBlock.size(), true) + " bytes allocated "
-        + (endTime - startTime) + " msec");
-  }
-
-  @Test
-  public void testPutAndReadValidation() {
-    int rowNum = 1000;
-
-    long allocStart = System.currentTimeMillis();
-    OffHeapRowBlock rowBlock = new OffHeapRowBlock(schema, 1024);
-    long allocEnd = System.currentTimeMillis();
-    explainRowBlockAllocation(rowBlock, allocStart, allocEnd);
-
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(rowBlock);
-
-    ZeroCopyTuple tuple = new ZeroCopyTuple();
-    long writeStart = System.currentTimeMillis();
-    for (int i = 0; i < rowNum; i++) {
-      fillRow(i, rowBlock.getWriter());
-
-      reader.reset();
-      int j = 0;
-      while(reader.next(tuple)) {
-        validateTupleResult(j, tuple);
-
-        j++;
-      }
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("writing and validating take " + (writeEnd - writeStart) + " msec");
-
-    long readStart = System.currentTimeMillis();
-    tuple = new ZeroCopyTuple();
-    int j = 0;
-    reader.reset();
-    while(reader.next(tuple)) {
-      validateTupleResult(j, tuple);
-      j++;
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("reading takes " + (readEnd - readStart) + " msec");
-
-    rowBlock.release();
-  }
-
-  @Test
-  public void testNullityValidation() {
-    int rowNum = 1000;
-
-    long allocStart = System.currentTimeMillis();
-    OffHeapRowBlock rowBlock = new OffHeapRowBlock(schema, 1024);
-    long allocEnd = System.currentTimeMillis();
-    explainRowBlockAllocation(rowBlock, allocStart, allocEnd);
-
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(rowBlock);
-    ZeroCopyTuple tuple = new ZeroCopyTuple();
-    long writeStart = System.currentTimeMillis();
-    for (int i = 0; i < rowNum; i++) {
-      fillRowBlockWithNull(i, rowBlock.getWriter());
-
-      reader.reset();
-      int j = 0;
-      while(reader.next(tuple)) {
-        validateNullity(j, tuple);
-
-        j++;
-      }
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("writing and nullity validating take " + (writeEnd - writeStart) +" msec");
-
-    long readStart = System.currentTimeMillis();
-    tuple = new ZeroCopyTuple();
-    int j = 0;
-    reader.reset();
-    while(reader.next(tuple)) {
-      validateNullity(j, tuple);
-
-      j++;
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("reading takes " + (readEnd - readStart) + " msec");
-
-    rowBlock.release();
-  }
-
-  @Test
-  public void testEmptyRow() {
-    int rowNum = 1000;
-
-    long allocStart = System.currentTimeMillis();
-    OffHeapRowBlock rowBlock = new OffHeapRowBlock(schema, StorageUnit.MB * 10);
-    long allocEnd = System.currentTimeMillis();
-    explainRowBlockAllocation(rowBlock, allocStart, allocEnd);
-
-    long writeStart = System.currentTimeMillis();
-    for (int i = 0; i < rowNum; i++) {
-      rowBlock.getWriter().startRow();
-      // empty columns
-      rowBlock.getWriter().endRow();
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("writing tooks " + (writeEnd - writeStart) + " msec");
-
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(rowBlock);
-
-    long readStart = System.currentTimeMillis();
-    ZeroCopyTuple tuple = new ZeroCopyTuple();
-    int j = 0;
-    reader.reset();
-    while(reader.next(tuple)) {
-      j++;
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("reading takes " + (readEnd - readStart) + " msec");
-    rowBlock.release();
-
-    assertEquals(rowNum, j);
-    assertEquals(rowNum, rowBlock.rows());
-  }
-
-  @Test
-  public void testSortBenchmark() {
-    int rowNum = 1000;
-
-    OffHeapRowBlock rowBlock = createRowBlock(rowNum);
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(rowBlock);
-
-    List<ZeroCopyTuple> unSafeTuples = Lists.newArrayList();
-
-    long readStart = System.currentTimeMillis();
-    ZeroCopyTuple tuple = new ZeroCopyTuple();
-    reader.reset();
-    while(reader.next(tuple)) {
-      unSafeTuples.add(tuple);
-      tuple = new ZeroCopyTuple();
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("reading takes " + (readEnd - readStart) + " msec");
-
-    SortSpec sortSpec = new SortSpec(new Column("col2", Type.INT4));
-    BaseTupleComparator comparator = new BaseTupleComparator(schema, new SortSpec[] {sortSpec});
-
-    long sortStart = System.currentTimeMillis();
-    Collections.sort(unSafeTuples, comparator);
-    long sortEnd = System.currentTimeMillis();
-    LOG.info("sorting took " + (sortEnd - sortStart) + " msec");
-    rowBlock.release();
-  }
-
-  @Test
-  public void testVTuplePutAndGetBenchmark() {
-    int rowNum = 1000;
-
-    List<VTuple> rowBlock = Lists.newArrayList();
-    long writeStart = System.currentTimeMillis();
-    VTuple tuple;
-    for (int i = 0; i < rowNum; i++) {
-      tuple = new VTuple(schema.size());
-      fillVTuple(i, tuple);
-      rowBlock.add(tuple);
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("Writing takes " + (writeEnd - writeStart) + " msec");
-
-    long readStart = System.currentTimeMillis();
-    int j = 0;
-    for (VTuple t : rowBlock) {
-      validateTupleResult(j, t);
-      j++;
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("reading takes " + (readEnd - readStart) + " msec");
-
-    int count = 0;
-    for (int l = 0; l < rowBlock.size(); l++) {
-      for(int m = 0; m < schema.size(); m++ ) {
-        if (rowBlock.get(l).contains(m) && rowBlock.get(l).get(m).type() == Type.INT4) {
-          count ++;
-        }
-      }
-    }
-    // For preventing unnecessary code elimination optimization.
-    LOG.info("The number of INT4 values is " + count + ".");
-  }
-
-  @Test
-  public void testVTuplePutAndGetBenchmarkViaDirectRowEncoder() {
-    int rowNum = 1000;
-
-    OffHeapRowBlock rowBlock = new OffHeapRowBlock(schema, StorageUnit.MB * 100);
-
-    long writeStart = System.currentTimeMillis();
-    VTuple tuple = new VTuple(schema.size());
-    for (int i = 0; i < rowNum; i++) {
-      fillVTuple(i, tuple);
-
-      RowStoreUtil.convert(tuple, rowBlock.getWriter());
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("Writing takes " + (writeEnd - writeStart) + " msec");
-
-    validateResults(rowBlock);
-    rowBlock.release();
-  }
-
-  @Test
-  public void testSerDerOfRowBlock() {
-    int rowNum = 1000;
-
-    OffHeapRowBlock rowBlock = createRowBlock(rowNum);
-
-    ByteBuffer bb = rowBlock.nioBuffer();
-    OffHeapRowBlock restoredRowBlock = new OffHeapRowBlock(schema, bb);
-    validateResults(restoredRowBlock);
-    rowBlock.release();
-  }
-
-  @Test
-  public void testSerDerOfZeroCopyTuple() {
-    int rowNum = 1000;
-
-    OffHeapRowBlock rowBlock = createRowBlock(rowNum);
-
-    ByteBuffer bb = rowBlock.nioBuffer();
-    OffHeapRowBlock restoredRowBlock = new OffHeapRowBlock(schema, bb);
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(restoredRowBlock);
-
-    long readStart = System.currentTimeMillis();
-    ZeroCopyTuple tuple = new ZeroCopyTuple();
-    ZeroCopyTuple copyTuple = new ZeroCopyTuple();
-    int j = 0;
-    reader.reset();
-    while(reader.next(tuple)) {
-      ByteBuffer copy = tuple.nioBuffer();
-      copyTuple.set(copy, SchemaUtil.toDataTypes(schema));
-
-      validateTupleResult(j, copyTuple);
-
-      j++;
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("reading takes " + (readEnd - readStart) + " msec");
-
-    rowBlock.release();
-  }
-
-  public static OffHeapRowBlock createRowBlock(int rowNum) {
-    long allocateStart = System.currentTimeMillis();
-    OffHeapRowBlock rowBlock = new OffHeapRowBlock(schema, StorageUnit.MB * 8);
-    long allocatedEnd = System.currentTimeMillis();
-    LOG.info(FileUtil.humanReadableByteCount(rowBlock.size(), true) + " bytes allocated "
-        + (allocatedEnd - allocateStart) + " msec");
-
-    long writeStart = System.currentTimeMillis();
-    for (int i = 0; i < rowNum; i++) {
-      fillRow(i, rowBlock.getWriter());
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("writing takes " + (writeEnd - writeStart) + " msec");
-
-    return rowBlock;
-  }
-
-  public static OffHeapRowBlock createRowBlockWithNull(int rowNum) {
-    long allocateStart = System.currentTimeMillis();
-    OffHeapRowBlock rowBlock = new OffHeapRowBlock(schema, StorageUnit.MB * 8);
-    long allocatedEnd = System.currentTimeMillis();
-    LOG.info(FileUtil.humanReadableByteCount(rowBlock.size(), true) + " bytes allocated "
-        + (allocatedEnd - allocateStart) + " msec");
-
-    long writeStart = System.currentTimeMillis();
-    for (int i = 0; i < rowNum; i++) {
-      fillRowBlockWithNull(i, rowBlock.getWriter());
-    }
-    long writeEnd = System.currentTimeMillis();
-    LOG.info("writing and validating take " + (writeEnd - writeStart) + " msec");
-
-    return rowBlock;
-  }
-
-  public static void fillRow(int i, RowWriter builder) {
-    builder.startRow();
-    builder.putBool(i % 1 == 0 ? true : false); // 0
-    builder.putInt2((short) 1);                 // 1
-    builder.putInt4(i);                         // 2
-    builder.putInt8(i);                         // 3
-    builder.putFloat4(i);                       // 4
-    builder.putFloat8(i);                       // 5
-    builder.putText((UNICODE_FIELD_PREFIX + i).getBytes());  // 6
-    builder.putTimestamp(DatumFactory.createTimestamp("2014-04-16 08:48:00").asInt8() + i); // 7
-    builder.putDate(DatumFactory.createDate("2014-04-16").asInt4() + i); // 8
-    builder.putTime(DatumFactory.createTime("08:48:00").asInt8() + i); // 9
-    builder.putInterval(DatumFactory.createInterval((i + 1) + " hours")); // 10
-    builder.putInet4(DatumFactory.createInet4("192.168.0.1").asInt4() + i); // 11
-    builder.putProtoDatum(new ProtobufDatum(ProtoUtil.convertString(i + ""))); // 12
-    builder.endRow();
-  }
-
-  public static void fillRowBlockWithNull(int i, RowWriter writer) {
-    writer.startRow();
-
-    if (i == 0) {
-      writer.skipField();
-    } else {
-      writer.putBool(i % 1 == 0 ? true : false); // 0
-    }
-    if (i % 1 == 0) {
-      writer.skipField();
-    } else {
-      writer.putInt2((short) 1);                 // 1
-    }
-
-    if (i % 2 == 0) {
-      writer.skipField();
-    } else {
-      writer.putInt4(i);                         // 2
-    }
-
-    if (i % 3 == 0) {
-      writer.skipField();
-    } else {
-      writer.putInt8(i);                         // 3
-    }
-
-    if (i % 4 == 0) {
-      writer.skipField();
-    } else {
-      writer.putFloat4(i);                       // 4
-    }
-
-    if (i % 5 == 0) {
-      writer.skipField();
-    } else {
-      writer.putFloat8(i);                       // 5
-    }
-
-    if (i % 6 == 0) {
-      writer.skipField();
-    } else {
-      writer.putText((UNICODE_FIELD_PREFIX + i).getBytes());  // 6
-    }
-
-    if (i % 7 == 0) {
-      writer.skipField();
-    } else {
-      writer.putTimestamp(DatumFactory.createTimestamp("2014-04-16 08:48:00").asInt8() + i); // 7
-    }
-
-    if (i % 8 == 0) {
-      writer.skipField();
-    } else {
-      writer.putDate(DatumFactory.createDate("2014-04-16").asInt4() + i); // 8
-    }
-
-    if (i % 9 == 0) {
-      writer.skipField();
-    } else {
-      writer.putTime(DatumFactory.createTime("08:48:00").asInt8() + i); // 9
-    }
-
-    if (i % 10 == 0) {
-      writer.skipField();
-    } else {
-      writer.putInterval(DatumFactory.createInterval((i + 1) + " hours")); // 10
-    }
-
-    if (i % 11 == 0) {
-      writer.skipField();
-    } else {
-      writer.putInet4(DatumFactory.createInet4("192.168.0.1").asInt4() + i); // 11
-    }
-
-    if (i % 12 == 0) {
-      writer.skipField();
-    } else {
-      writer.putProtoDatum(new ProtobufDatum(ProtoUtil.convertString(i + ""))); // 12
-    }
-
-    writer.endRow();
-  }
-
-  public static void fillVTuple(int i, VTuple tuple) {
-    tuple.put(0, DatumFactory.createBool(i % 1 == 0));
-    tuple.put(1, DatumFactory.createInt2((short) 1));
-    tuple.put(2, DatumFactory.createInt4(i));
-    tuple.put(3, DatumFactory.createInt8(i));
-    tuple.put(4, DatumFactory.createFloat4(i));
-    tuple.put(5, DatumFactory.createFloat8(i));
-    tuple.put(6, DatumFactory.createText((UNICODE_FIELD_PREFIX + i).getBytes()));
-    tuple.put(7, DatumFactory.createTimestamp(DatumFactory.createTimestamp("2014-04-16 08:48:00").asInt8() + i)); // 7
-    tuple.put(8, DatumFactory.createDate(DatumFactory.createDate("2014-04-16").asInt4() + i)); // 8
-    tuple.put(9, DatumFactory.createTime(DatumFactory.createTime("08:48:00").asInt8() + i)); // 9
-    tuple.put(10, DatumFactory.createInterval((i + 1) + " hours")); // 10
-    tuple.put(11, DatumFactory.createInet4(DatumFactory.createInet4("192.168.0.1").asInt4() + i)); // 11
-    tuple.put(12, new ProtobufDatum(ProtoUtil.convertString(i + ""))); // 12;
-  }
-
-  public static void validateResults(OffHeapRowBlock rowBlock) {
-    long readStart = System.currentTimeMillis();
-    ZeroCopyTuple tuple = new ZeroCopyTuple();
-    int j = 0;
-    OffHeapRowBlockReader reader = new OffHeapRowBlockReader(rowBlock);
-    reader.reset();
-    while(reader.next(tuple)) {
-      validateTupleResult(j, tuple);
-      j++;
-    }
-    long readEnd = System.currentTimeMillis();
-    LOG.info("Reading takes " + (readEnd - readStart) + " msec");
-  }
-
-  public static void validateTupleResult(int j, Tuple t) {
-    assertTrue((j % 1 == 0) == t.getBool(0));
-    assertTrue(1 == t.getInt2(1));
-    assertEquals(j, t.getInt4(2));
-    assertEquals(j, t.getInt8(3));
-    assertTrue(j == t.getFloat4(4));
-    assertTrue(j == t.getFloat8(5));
-    assertEquals(new String(UNICODE_FIELD_PREFIX + j), t.getText(6));
-    assertEquals(DatumFactory.createTimestamp("2014-04-16 08:48:00").asInt8() + (long) j, t.getInt8(7));
-    assertEquals(DatumFactory.createDate("2014-04-16").asInt4() + j, t.getInt4(8));
-    assertEquals(DatumFactory.createTime("08:48:00").asInt8() + j, t.getInt8(9));
-    assertEquals(DatumFactory.createInterval((j + 1) + " hours"), t.getInterval(10));
-    assertEquals(DatumFactory.createInet4("192.168.0.1").asInt4() + j, t.getInt4(11));
-    assertEquals(new ProtobufDatum(ProtoUtil.convertString(j + "")), t.getProtobufDatum(12));
-  }
-
-  public static void validateNullity(int j, Tuple tuple) {
-    if (j == 0) {
-      tuple.isNull(0);
-    } else {
-      assertTrue((j % 1 == 0) == tuple.getBool(0));
-    }
-
-    if (j % 1 == 0) {
-      tuple.isNull(1);
-    } else {
-      assertTrue(1 == tuple.getInt2(1));
-    }
-
-    if (j % 2 == 0) {
-      tuple.isNull(2);
-    } else {
-      assertEquals(j, tuple.getInt4(2));
-    }
-
-    if (j % 3 == 0) {
-      tuple.isNull(3);
-    } else {
-      assertEquals(j, tuple.getInt8(3));
-    }
-
-    if (j % 4 == 0) {
-      tuple.isNull(4);
-    } else {
-      assertTrue(j == tuple.getFloat4(4));
-    }
-
-    if (j % 5 == 0) {
-      tuple.isNull(5);
-    } else {
-      assertTrue(j == tuple.getFloat8(5));
-    }
-
-    if (j % 6 == 0) {
-      tuple.isNull(6);
-    } else {
-      assertEquals(new String(UNICODE_FIELD_PREFIX + j), tuple.getText(6));
-    }
-
-    if (j % 7 == 0) {
-      tuple.isNull(7);
-    } else {
-      assertEquals(DatumFactory.createTimestamp("2014-04-16 08:48:00").asInt8() + (long) j, tuple.getInt8(7));
-    }
-
-    if (j % 8 == 0) {
-      tuple.isNull(8);
-    } else {
-      assertEquals(DatumFactory.createDate("2014-04-16").asInt4() + j, tuple.getInt4(8));
-    }
-
-    if (j % 9 == 0) {
-      tuple.isNull(9);
-    } else {
-      assertEquals(DatumFactory.createTime("08:48:00").asInt8() + j, tuple.getInt8(9));
-    }
-
-    if (j % 10 == 0) {
-      tuple.isNull(10);
-    } else {
-      assertEquals(DatumFactory.createInterval((j + 1) + " hours"), tuple.getInterval(10));
-    }
-
-    if (j % 11 == 0) {
-      tuple.isNull(11);
-    } else {
-      assertEquals(DatumFactory.createInet4("192.168.0.1").asInt4() + j, tuple.getInt4(11));
-    }
-
-    if (j % 12 == 0) {
-      tuple.isNull(12);
-    } else {
-      assertEquals(new ProtobufDatum(ProtoUtil.convertString(j + "")), tuple.getProtobufDatum(12));
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestResizableSpec.java
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestResizableSpec.java b/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestResizableSpec.java
deleted file mode 100644
index 1eb9c17..0000000
--- a/tajo-storage/src/test/java/org/apache/tajo/tuple/offheap/TestResizableSpec.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.tuple.offheap;
-
-import org.apache.tajo.unit.StorageUnit;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-public class TestResizableSpec {
-
-  @Test
-  public void testResizableLimit() {
-    ResizableLimitSpec limit = new ResizableLimitSpec(10 * StorageUnit.MB, 1000 * StorageUnit.MB, 0.1f, 1.0f);
-
-    long expectedMaxSize = (long) (1000 * StorageUnit.MB + (1000 * StorageUnit.MB * 0.1f));
-
-    assertTrue(limit.limit() == 1000 * StorageUnit.MB + (1000 * StorageUnit.MB * 0.1f));
-
-    assertEquals(20971520, limit.increasedSize(10 * StorageUnit.MB));
-
-    assertEquals(expectedMaxSize, limit.increasedSize(1600 * StorageUnit.MB));
-
-    assertEquals(0.98f, limit.remainRatio(980 * StorageUnit.MB), 0.1);
-
-    assertFalse(limit.canIncrease(limit.limit()));
-  }
-
-  @Test
-  public void testFixedLimit() {
-    FixedSizeLimitSpec limit = new FixedSizeLimitSpec(100 * StorageUnit.MB, 0.0f);
-
-    assertEquals(limit.limit(), 100 * StorageUnit.MB);
-
-    assertEquals(100 * StorageUnit.MB, limit.increasedSize(1000));
-
-    assertEquals(100 * StorageUnit.MB, limit.increasedSize(1600 * StorageUnit.MB));
-
-    assertTrue(0.98f == limit.remainRatio(98 * StorageUnit.MB));
-
-    assertFalse(limit.canIncrease(limit.limit()));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance1.json
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance1.json b/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance1.json
deleted file mode 100644
index 739dfe7..0000000
--- a/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance1.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance2.json
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance2.json b/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance2.json
deleted file mode 100644
index 8256b72..0000000
--- a/tajo-storage/src/test/resources/dataset/TestDelimitedTextFile/testErrorTolerance2.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/resources/dataset/TestJsonSerDe/testVariousType.json
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/resources/dataset/TestJsonSerDe/testVariousType.json b/tajo-storage/src/test/resources/dataset/TestJsonSerDe/testVariousType.json
deleted file mode 100644
index 8ee3408..0000000
--- a/tajo-storage/src/test/resources/dataset/TestJsonSerDe/testVariousType.json
+++ /dev/null
@@ -1 +0,0 @@
-{"col1": "true", "col2": "hyunsik", "col3": 17, "col4": 59, "col5": 23, "col6": 77.9, "col7": 271.9, "col8": "hyunsik", "col9": "hyunsik", "col10": "192.168.0.1"}

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/resources/dataset/testLineText.txt
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/resources/dataset/testLineText.txt b/tajo-storage/src/test/resources/dataset/testLineText.txt
deleted file mode 100644
index 7403c26..0000000
--- a/tajo-storage/src/test/resources/dataset/testLineText.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-1|25|emiya muljomdao
-2|25|emiya muljomdao
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/resources/dataset/testVariousTypes.avsc
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/resources/dataset/testVariousTypes.avsc b/tajo-storage/src/test/resources/dataset/testVariousTypes.avsc
deleted file mode 100644
index d4250a9..0000000
--- a/tajo-storage/src/test/resources/dataset/testVariousTypes.avsc
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "type": "record",
-  "namespace": "org.apache.tajo",
-  "name": "testVariousTypes",
-  "fields": [
-    { "name": "col1", "type": "boolean" },
-    { "name": "col2", "type": "string" },
-    { "name": "col3", "type": "int" },
-    { "name": "col4", "type": "int" },
-    { "name": "col5", "type": "long" },
-    { "name": "col6", "type": "float" },
-    { "name": "col7", "type": "double" },
-    { "name": "col8", "type": "string" },
-    { "name": "col9", "type": "bytes" },
-    { "name": "col10", "type": "bytes" },
-    { "name": "col11", "type": "null" },
-    { "name": "col12", "type": "bytes" }
-  ]
-}
-

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/src/test/resources/storage-default.xml
----------------------------------------------------------------------
diff --git a/tajo-storage/src/test/resources/storage-default.xml b/tajo-storage/src/test/resources/storage-default.xml
deleted file mode 100644
index f4c81c7..0000000
--- a/tajo-storage/src/test/resources/storage-default.xml
+++ /dev/null
@@ -1,168 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<configuration>
-  <property>
-    <name>fs.s3.impl</name>
-    <value>org.apache.tajo.storage.s3.SmallBlockS3FileSystem</value>
-  </property>
-
-  <!--- Registered Scanner Handler -->
-  <property>
-    <name>tajo.storage.scanner-handler</name>
-    <value>textfile,csv,json,raw,rcfile,row,parquet,sequencefile,avro</value>
-  </property>
-
-  <!--- Fragment Class Configurations -->
-  <property>
-    <name>tajo.storage.fragment.textfile.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.csv.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.json.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.raw.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.rcfile.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.row.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.parquet.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.sequencefile.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-  <property>
-    <name>tajo.storage.fragment.avro.class</name>
-    <value>org.apache.tajo.storage.fragment.FileFragment</value>
-  </property>
-
-  <!--- Scanner Handler -->
-  <property>
-    <name>tajo.storage.scanner-handler.textfile.class</name>
-    <value>org.apache.tajo.storage.text.DelimitedTextFile$DelimitedTextFileScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.csv.class</name>
-    <value>org.apache.tajo.storage.CSVFile$CSVScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.json.class</name>
-    <value>org.apache.tajo.storage.text.DelimitedTextFile$DelimitedTextFileScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.raw.class</name>
-    <value>org.apache.tajo.storage.RawFile$RawFileScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.rcfile.class</name>
-    <value>org.apache.tajo.storage.rcfile.RCFile$RCFileScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.rowfile.class</name>
-    <value>org.apache.tajo.storage.RowFile$RowFileScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.parquet.class</name>
-    <value>org.apache.tajo.storage.parquet.ParquetScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.sequencefile.class</name>
-    <value>org.apache.tajo.storage.sequencefile.SequenceFileScanner</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.scanner-handler.avro.class</name>
-    <value>org.apache.tajo.storage.avro.AvroScanner</value>
-  </property>
-
-  <!--- Appender Handler -->
-  <property>
-    <name>tajo.storage.appender-handler</name>
-    <value>textfile,csv,raw,rcfile,row,parquet,sequencefile,avro</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.textfile.class</name>
-    <value>org.apache.tajo.storage.text.DelimitedTextFile$DelimitedTextFileAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.csv.class</name>
-    <value>org.apache.tajo.storage.CSVFile$CSVAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.json.class</name>
-    <value>org.apache.tajo.storage.text.DelimitedTextFile$DelimitedTextFileAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.raw.class</name>
-    <value>org.apache.tajo.storage.RawFile$RawFileAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.rcfile.class</name>
-    <value>org.apache.tajo.storage.rcfile.RCFile$RCFileAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.rowfile.class</name>
-    <value>org.apache.tajo.storage.RowFile$RowFileAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.parquet.class</name>
-    <value>org.apache.tajo.storage.parquet.ParquetAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.sequencefile.class</name>
-    <value>org.apache.tajo.storage.sequencefile.SequenceFileAppender</value>
-  </property>
-
-  <property>
-    <name>tajo.storage.appender-handler.avro.class</name>
-    <value>org.apache.tajo.storage.avro.AvroAppender</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/tajo-storage-common/pom.xml
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/pom.xml b/tajo-storage/tajo-storage-common/pom.xml
new file mode 100644
index 0000000..c600b4b
--- /dev/null
+++ b/tajo-storage/tajo-storage-common/pom.xml
@@ -0,0 +1,337 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>tajo-project</artifactId>
+    <groupId>org.apache.tajo</groupId>
+    <version>0.9.1-SNAPSHOT</version>
+    <relativePath>../../tajo-project</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>tajo-storage-common</artifactId>
+  <packaging>jar</packaging>
+  <name>Tajo Storage Common</name>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+  </properties>
+
+  <repositories>
+    <repository>
+      <id>repository.jboss.org</id>
+      <url>https://repository.jboss.org/nexus/content/repositories/releases/
+      </url>
+      <snapshots>
+        <enabled>false</enabled>
+      </snapshots>
+    </repository>
+  </repositories>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <configuration>
+          <source>1.6</source>
+          <target>1.6</target>
+          <encoding>${project.build.sourceEncoding}</encoding>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>verify</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <systemProperties>
+            <tajo.test>TRUE</tajo.test>
+          </systemProperties>
+          <argLine>-Xms512m -Xmx1024m -XX:MaxPermSize=128m -Dfile.encoding=UTF-8</argLine>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <version>2.4</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-protobuf-generated-sources-directory</id>
+            <phase>initialize</phase>
+            <configuration>
+              <target>
+                <mkdir dir="target/generated-sources/proto" />
+              </target>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2</version>
+        <executions>
+          <execution>
+            <id>generate-sources</id>
+            <phase>generate-sources</phase>
+            <configuration>
+              <executable>protoc</executable>
+              <arguments>
+                <argument>-Isrc/main/proto/</argument>
+                <argument>--proto_path=../../tajo-common/src/main/proto</argument>
+                <argument>--proto_path=../../tajo-catalog/tajo-catalog-common/src/main/proto</argument>
+                <argument>--java_out=target/generated-sources/proto</argument>
+                <argument>src/main/proto/IndexProtos.proto</argument>
+              </arguments>
+            </configuration>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.5</version>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>target/generated-sources/proto</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-pmd-plugin</artifactId>
+        <version>2.7.1</version>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-report-plugin</artifactId>
+      </plugin>
+    </plugins>
+  </build>
+
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.tajo</groupId>
+      <artifactId>tajo-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.tajo</groupId>
+      <artifactId>tajo-catalog-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.tajo</groupId>
+      <artifactId>tajo-plan</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <artifactId>zookeeper</artifactId>
+          <groupId>org.apache.zookeeper</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>slf4j-api</artifactId>
+          <groupId>org.slf4j</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>jersey-json</artifactId>
+          <groupId>com.sun.jersey</groupId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1-jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey.jersey-test-framework</groupId>
+          <artifactId>jersey-test-framework-grizzly2</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1-jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey.jersey-test-framework</groupId>
+          <artifactId>jersey-test-framework-grizzly2</artifactId>
+        </exclusion>
+        <exclusion>
+          <artifactId>hadoop-yarn-server-tests</artifactId>
+          <groupId>org.apache.hadoop</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+          <groupId>org.apache.hadoop</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>hadoop-mapreduce-client-app</artifactId>
+          <groupId>org.apache.hadoop</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>hadoop-yarn-api</artifactId>
+          <groupId>org.apache.hadoop</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>hadoop-mapreduce-client-hs</artifactId>
+          <groupId>org.apache.hadoop</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+          <groupId>org.apache.hadoop</groupId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-buffer</artifactId>
+    </dependency>
+  </dependencies>
+
+  <profiles>
+    <profile>
+      <id>docs</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-javadoc-plugin</artifactId>
+            <executions>
+              <execution>
+                <!-- build javadoc jars per jar for publishing to maven -->
+                <id>module-javadocs</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>jar</goal>
+                </goals>
+                <configuration>
+                  <destDir>${project.build.directory}</destDir>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+  <reporting>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-report-plugin</artifactId>
+        <version>2.15</version>
+      </plugin>
+    </plugins>
+  </reporting>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/Appender.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/Appender.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/Appender.java
new file mode 100644
index 0000000..c5e96ac
--- /dev/null
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/Appender.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.storage;
+
+import org.apache.tajo.catalog.statistics.TableStats;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+public interface Appender extends Closeable {
+
+  void init() throws IOException;
+
+  void addTuple(Tuple t) throws IOException;
+  
+  void flush() throws IOException;
+
+  long getEstimatedOutputSize() throws IOException;
+  
+  void close() throws IOException;
+
+  void enableStats();
+  
+  TableStats getStats();
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/4561711f/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BaseTupleComparator.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BaseTupleComparator.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BaseTupleComparator.java
new file mode 100644
index 0000000..b829f60
--- /dev/null
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BaseTupleComparator.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.storage;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.SortSpec;
+import org.apache.tajo.common.ProtoObject;
+import org.apache.tajo.datum.Datum;
+
+import static org.apache.tajo.catalog.proto.CatalogProtos.TupleComparatorSpecProto;
+import static org.apache.tajo.index.IndexProtos.TupleComparatorProto;
+
+/**
+ * The Comparator class for Tuples
+ * 
+ * @see Tuple
+ */
+public class BaseTupleComparator extends TupleComparator implements ProtoObject<TupleComparatorProto> {
+  private final Schema schema;
+  private final SortSpec [] sortSpecs;
+  private final int[] sortKeyIds;
+  private final boolean[] asc;
+  @SuppressWarnings("unused")
+  private final boolean[] nullFirsts;  
+
+  private Datum left;
+  private Datum right;
+  private int compVal;
+
+  /**
+   * @param schema The schema of input tuples
+   * @param sortKeys The description of sort keys
+   */
+  public BaseTupleComparator(Schema schema, SortSpec[] sortKeys) {
+    Preconditions.checkArgument(sortKeys.length > 0, 
+        "At least one sort key must be specified.");
+
+    this.schema = schema;
+    this.sortSpecs = sortKeys;
+    this.sortKeyIds = new int[sortKeys.length];
+    this.asc = new boolean[sortKeys.length];
+    this.nullFirsts = new boolean[sortKeys.length];
+    for (int i = 0; i < sortKeys.length; i++) {
+      if (sortKeys[i].getSortKey().hasQualifier()) {
+        this.sortKeyIds[i] = schema.getColumnId(sortKeys[i].getSortKey().getQualifiedName());
+      } else {
+        this.sortKeyIds[i] = schema.getColumnIdByName(sortKeys[i].getSortKey().getSimpleName());
+      }
+          
+      this.asc[i] = sortKeys[i].isAscending();
+      this.nullFirsts[i]= sortKeys[i].isNullFirst();
+    }
+  }
+
+  public BaseTupleComparator(TupleComparatorProto proto) {
+    this.schema = new Schema(proto.getSchema());
+
+    this.sortSpecs = new SortSpec[proto.getSortSpecsCount()];
+    for (int i = 0; i < proto.getSortSpecsCount(); i++) {
+      sortSpecs[i] = new SortSpec(proto.getSortSpecs(i));
+    }
+
+    this.sortKeyIds = new int[proto.getCompSpecsCount()];
+    this.asc = new boolean[proto.getCompSpecsCount()];
+    this.nullFirsts = new boolean[proto.getCompSpecsCount()];
+
+    for (int i = 0; i < proto.getCompSpecsCount(); i++) {
+      TupleComparatorSpecProto sortSepcProto = proto.getCompSpecs(i);
+      sortKeyIds[i] = sortSepcProto.getColumnId();
+      asc[i] = sortSepcProto.getAscending();
+      nullFirsts[i] = sortSepcProto.getNullFirst();
+    }
+  }
+
+  public Schema getSchema() {
+    return schema;
+  }
+
+  public SortSpec [] getSortSpecs() {
+    return sortSpecs;
+  }
+
+  public int [] getSortKeyIds() {
+    return sortKeyIds;
+  }
+
+  @Override
+  public boolean isAscendingFirstKey() {
+    return this.asc[0];
+  }
+
+  @Override
+  public int compare(Tuple tuple1, Tuple tuple2) {
+    for (int i = 0; i < sortKeyIds.length; i++) {
+      left = tuple1.get(sortKeyIds[i]);
+      right = tuple2.get(sortKeyIds[i]);
+
+      if (left.isNull() || right.isNull()) {
+        if (!left.equals(right)) {
+          if (left.isNull()) {
+            compVal = 1;
+          } else if (right.isNull()) {
+            compVal = -1;
+          }
+          if (nullFirsts[i]) {
+            if (compVal != 0) {
+              compVal *= -1;
+            }
+          }
+        } else {
+          compVal = 0;
+        }
+      } else {
+        if (asc[i]) {
+          compVal = left.compareTo(right);
+        } else {
+          compVal = right.compareTo(left);
+        }
+      }
+
+      if (compVal < 0 || compVal > 0) {
+        return compVal;
+      }
+    }
+    return 0;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(sortKeyIds);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof BaseTupleComparator) {
+      BaseTupleComparator other = (BaseTupleComparator) obj;
+      if (sortKeyIds.length != other.sortKeyIds.length) {
+        return false;
+      }
+
+      for (int i = 0; i < sortKeyIds.length; i++) {
+        if (sortKeyIds[i] != other.sortKeyIds[i] ||
+            asc[i] != other.asc[i] ||
+            nullFirsts[i] != other.nullFirsts[i]) {
+          return false;
+        }
+      }
+
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  public TupleComparatorProto getProto() {
+    TupleComparatorProto.Builder builder = TupleComparatorProto.newBuilder();
+    builder.setSchema(schema.getProto());
+    for (int i = 0; i < sortSpecs.length; i++) {
+      builder.addSortSpecs(sortSpecs[i].getProto());
+    }
+
+    TupleComparatorSpecProto.Builder sortSpecBuilder;
+    for (int i = 0; i < sortKeyIds.length; i++) {
+      sortSpecBuilder = TupleComparatorSpecProto.newBuilder();
+      sortSpecBuilder.setColumnId(sortKeyIds[i]);
+      sortSpecBuilder.setAscending(asc[i]);
+      sortSpecBuilder.setNullFirst(nullFirsts[i]);
+      builder.addCompSpecs(sortSpecBuilder);
+    }
+
+    return builder.build();
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+
+    String prefix = "";
+    for (int i = 0; i < sortKeyIds.length; i++) {
+      sb.append(prefix).append("SortKeyId=").append(sortKeyIds[i])
+        .append(",Asc=").append(asc[i])
+        .append(",NullFirst=").append(nullFirsts[i]);
+      prefix = " ,";
+    }
+    return sb.toString();
+  }
+}
\ No newline at end of file