You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tajo.apache.org by ji...@apache.org on 2015/05/16 14:15:32 UTC

[01/10] tajo git commit: TAJO-1598: TableMeta should change equals mechanism.

Repository: tajo
Updated Branches:
  refs/heads/index_support 6f4409629 -> 2cbc1b9c7


TAJO-1598: TableMeta should change equals mechanism.

Closes #565

Signed-off-by: Jihoon Son <ji...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/6977471e
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/6977471e
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/6977471e

Branch: refs/heads/index_support
Commit: 6977471e486ec9f77c24e17b51eef71f99978f13
Parents: 53ed1c3
Author: DaeMyung Kang <ch...@naver.com>
Authored: Wed May 13 00:08:50 2015 +0900
Committer: Jihoon Son <ji...@apache.org>
Committed: Wed May 13 00:08:50 2015 +0900

----------------------------------------------------------------------
 CHANGES                                         |  3 +++
 .../java/org/apache/tajo/catalog/TableMeta.java |  6 +++--
 .../org/apache/tajo/catalog/TestTableMeta.java  | 27 ++++++++++++++++++++
 3 files changed, 34 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/6977471e/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 73fd2f3..4912114 100644
--- a/CHANGES
+++ b/CHANGES
@@ -123,6 +123,9 @@ Release 0.11.0 - unreleased
 
   BUG FIXES
 
+    TAJO-1598: TableMeta should change equals mechanism.
+    (Contributed by DaeMyung Kang, Committed by jihoon)
+
     TAJO-1593: Add missing stop condition to Taskrunner. (jinho)
 
     TAJO-1556: "insert into select" with reordered column list does not work.

http://git-wip-us.apache.org/repos/asf/tajo/blob/6977471e/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableMeta.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableMeta.java b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableMeta.java
index 59cba44..2b31b83 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableMeta.java
+++ b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableMeta.java
@@ -136,8 +136,10 @@ public class TableMeta implements ProtoObject<CatalogProtos.TableProto>, GsonObj
 	public boolean equals(Object object) {
 		if(object instanceof TableMeta) {
 			TableMeta other = (TableMeta) object;
-			
-			return this.getProto().equals(other.getProto());
+
+			boolean eq = this.getStoreType().equals(other.getStoreType());
+			eq = eq && this.getOptions().equals(other.getOptions());
+			return eq;
 		}
 		
 		return false;		

http://git-wip-us.apache.org/repos/asf/tajo/blob/6977471e/tajo-catalog/tajo-catalog-common/src/test/java/org/apache/tajo/catalog/TestTableMeta.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/test/java/org/apache/tajo/catalog/TestTableMeta.java b/tajo-catalog/tajo-catalog-common/src/test/java/org/apache/tajo/catalog/TestTableMeta.java
index e1c0158..56c2ad2 100644
--- a/tajo-catalog/tajo-catalog-common/src/test/java/org/apache/tajo/catalog/TestTableMeta.java
+++ b/tajo-catalog/tajo-catalog-common/src/test/java/org/apache/tajo/catalog/TestTableMeta.java
@@ -22,6 +22,7 @@ import org.apache.tajo.catalog.json.CatalogGsonHelper;
 import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.proto.CatalogProtos.TableProto;
 import org.apache.tajo.common.TajoDataTypes.Type;
+import org.apache.tajo.rpc.protocolrecords.PrimitiveProtos;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -86,6 +87,32 @@ public class TestTableMeta {
     assertTrue(meta.equals(meta2));
     assertNotSame(meta, meta2);
   }
+
+	@Test
+	public void testEqualsObject2() {
+		//This testcases should insert more 2 items into one slot.
+		//HashMap's default slot count is 16
+		//so max_count is 17
+
+		int MAX_COUNT = 17;
+
+		TableMeta meta1 = CatalogUtil.newTableMeta(StoreType.CSV.toString());
+		for (int i = 0; i < MAX_COUNT; i++) {
+			meta1.putOption("key"+i, "value"+i);
+		}
+
+		PrimitiveProtos.KeyValueSetProto.Builder optionBuilder = PrimitiveProtos.KeyValueSetProto.newBuilder();
+		for (int i = 1; i <= MAX_COUNT; i++) {
+			PrimitiveProtos.KeyValueProto.Builder keyValueBuilder = PrimitiveProtos.KeyValueProto.newBuilder();
+			keyValueBuilder.setKey("key"+(MAX_COUNT-i)).setValue("value"+(MAX_COUNT-i));
+			optionBuilder.addKeyval(keyValueBuilder);
+		}
+		TableProto.Builder builder = TableProto.newBuilder();
+		builder.setStoreType(StoreType.CSV.toString());
+		builder.setParams(optionBuilder);
+		TableMeta meta2 = new TableMeta(builder.build());
+		assertTrue(meta1.equals(meta2));
+	}
   
   @Test
   public void testGetProto() {


[10/10] tajo git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/tajo into index_support

Posted by ji...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/tajo into index_support


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/2cbc1b9c
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/2cbc1b9c
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/2cbc1b9c

Branch: refs/heads/index_support
Commit: 2cbc1b9c7dc1ec04c4f6c8d7c8f746225d02421d
Parents: 6f44096 5491f0e
Author: Jihoon Son <ji...@apache.org>
Authored: Sat May 16 21:05:31 2015 +0900
Committer: Jihoon Son <ji...@apache.org>
Committed: Sat May 16 21:05:31 2015 +0900

----------------------------------------------------------------------
 CHANGES                                         |  15 +
 .../java/org/apache/tajo/catalog/Schema.java    |  16 +
 .../java/org/apache/tajo/catalog/TableMeta.java |   6 +-
 .../org/apache/tajo/catalog/TestTableMeta.java  |  27 +
 .../org/apache/tajo/cli/tools/TajoAdmin.java    |   6 +-
 .../org/apache/tajo/cli/tools/TajoHAAdmin.java  |   9 +-
 .../cli/tsql/commands/DescTableCommand.java     |   1 +
 .../apache/tajo/client/DummyServiceTracker.java |  19 +-
 .../org/apache/tajo/storage/RowStoreUtil.java   |  24 +-
 .../java/org/apache/tajo/conf/TajoConf.java     |   2 +
 .../ValueTooLongForTypeCharactersException.java |  27 +
 .../java/org/apache/tajo/ha/HAConstants.java    |   1 +
 .../java/org/apache/tajo/ha/HAServiceUtil.java  | 253 --------
 .../apache/tajo/service/BaseServiceTracker.java |  31 +-
 .../apache/tajo/service/HAServiceTracker.java   |  25 +-
 .../org/apache/tajo/service/ServiceTracker.java |  28 +-
 .../org/apache/tajo/storage/EmptyTuple.java     | 140 +---
 .../java/org/apache/tajo/storage/NullTuple.java | 175 +++++
 .../java/org/apache/tajo/storage/VTuple.java    |  20 +-
 .../engine/planner/PhysicalPlannerImpl.java     |  25 +-
 .../planner/physical/BSTIndexScanExec.java      |   4 +-
 .../physical/BasicPhysicalExecutorVisitor.java  |   8 -
 .../planner/physical/ColPartitionStoreExec.java |   7 +-
 .../planner/physical/CommonHashJoinExec.java    | 191 ++++++
 .../engine/planner/physical/CommonJoinExec.java | 172 ++++-
 .../planner/physical/HashFullOuterJoinExec.java | 247 +++----
 .../engine/planner/physical/HashJoinExec.java   | 212 +-----
 .../planner/physical/HashLeftAntiJoinExec.java  |  59 +-
 .../planner/physical/HashLeftOuterJoinExec.java | 292 +--------
 .../planner/physical/HashLeftSemiJoinExec.java  |  48 +-
 .../planner/physical/NLLeftOuterJoinExec.java   | 101 ---
 .../physical/PhysicalExecutorVisitor.java       |   3 -
 .../physical/RangeShuffleFileWriteExec.java     |   3 +-
 .../physical/RightOuterMergeJoinExec.java       |  40 +-
 .../engine/planner/physical/SeqScanExec.java    |   2 +-
 .../engine/planner/physical/StoreTableExec.java |   9 +-
 .../apache/tajo/engine/utils/CacheHolder.java   |   3 +-
 .../org/apache/tajo/ha/HdfsServiceTracker.java  | 322 +++++----
 .../org/apache/tajo/master/GlobalEngine.java    |   4 +-
 .../java/org/apache/tajo/master/TajoMaster.java |  24 +-
 .../apache/tajo/master/exec/DDLExecutor.java    |   5 +-
 .../exec/NonForwardQueryResultFileScanner.java  |   7 +-
 .../apache/tajo/master/exec/QueryExecutor.java  |   4 +-
 .../java/org/apache/tajo/querymaster/Query.java |   7 +-
 .../tajo/querymaster/QueryMasterTask.java       |   4 +-
 .../apache/tajo/querymaster/Repartitioner.java  |  24 +-
 .../java/org/apache/tajo/querymaster/Stage.java |   6 +-
 .../main/java/org/apache/tajo/util/JSPUtil.java |   2 +-
 .../java/org/apache/tajo/worker/TajoWorker.java |   6 +-
 .../main/java/org/apache/tajo/worker/Task.java  |   3 +-
 .../tajo/worker/WorkerHeartbeatService.java     |   2 +-
 .../resources/webapps/admin/catalogview.jsp     |  11 +-
 .../main/resources/webapps/admin/cluster.jsp    |  10 +-
 .../src/main/resources/webapps/admin/index.jsp  |  10 +-
 .../src/main/resources/webapps/admin/query.jsp  |   7 +-
 .../resources/webapps/admin/query_executor.jsp  |   9 +-
 .../org/apache/tajo/BackendTestingUtil.java     |   3 +-
 .../planner/global/TestBroadcastJoinPlan.java   |   3 +-
 .../planner/physical/TestBNLJoinExec.java       |   5 +-
 .../planner/physical/TestExternalSortExec.java  |   3 +-
 .../physical/TestFullOuterHashJoinExec.java     |   9 +-
 .../physical/TestFullOuterMergeJoinExec.java    |  10 +-
 .../planner/physical/TestHashAntiJoinExec.java  |   5 +-
 .../planner/physical/TestHashJoinExec.java      |   5 +-
 .../planner/physical/TestHashSemiJoinExec.java  |  13 +-
 .../physical/TestLeftOuterHashJoinExec.java     | 113 ++--
 .../physical/TestLeftOuterNLJoinExec.java       | 474 --------------
 .../planner/physical/TestMergeJoinExec.java     |   5 +-
 .../engine/planner/physical/TestNLJoinExec.java |   5 +-
 .../planner/physical/TestPhysicalPlanner.java   |  11 +-
 .../physical/TestProgressExternalSortExec.java  |   3 +-
 .../physical/TestRightOuterHashJoinExec.java    |   7 +-
 .../physical/TestRightOuterMergeJoinExec.java   |  11 +-
 .../engine/planner/physical/TestSortExec.java   |   5 +-
 .../tajo/engine/query/TestHBaseTable.java       |  12 +-
 .../tajo/engine/query/TestInsertQuery.java      |  45 +-
 .../tajo/engine/query/TestJoinBroadcast.java    |   2 +-
 .../apache/tajo/engine/util/TestTupleUtil.java  |  18 +
 .../apache/tajo/ha/TestHAServiceHDFSImpl.java   |  28 +-
 .../org/apache/tajo/jdbc/TestResultSet.java     |   2 +-
 .../tajo/master/TestExecutionBlockCursor.java   |   4 +-
 .../org/apache/tajo/storage/TestRowFile.java    |   5 +-
 .../queries/TestInsertQuery/test1_ddl.sql       |   1 +
 .../TestInsertQuery/test1_nolength_ddl.sql      |   1 +
 .../testInsertIntoSelectWithFixedSizeChar.sql   |   4 +
 ...tIntoSelectWithFixedSizeCharWithNoLength.sql |   2 +
 .../testJoinFilterOfRowPreservedTable1.sql      |   2 +-
 .../testJoinFilterOfRowPreservedTable1.result   |   2 +-
 .../org/apache/tajo/plan/LogicalPlanner.java    |   4 +
 .../plan/expr/AggregationFunctionCallEval.java  |   4 +-
 .../apache/tajo/plan/expr/AlgebraicUtil.java    |   5 +
 .../org/apache/tajo/plan/expr/EvalNode.java     |  39 +-
 .../java/org/apache/tajo/plan/expr/InEval.java  |   2 +-
 .../plan/expr/PatternMatchPredicateEval.java    |   2 +-
 .../tajo/plan/expr/WindowFunctionEval.java      |   2 +-
 .../stream/TextFieldSerializerDeserializer.java |   8 +-
 .../storage/BinarySerializerDeserializer.java   |  10 +
 .../org/apache/tajo/storage/FrameTuple.java     |  14 +-
 .../org/apache/tajo/storage/MergeScanner.java   |   2 +-
 .../org/apache/tajo/storage/RowStoreUtil.java   |  20 +-
 .../org/apache/tajo/storage/StorageManager.java | 645 +------------------
 .../org/apache/tajo/storage/TableSpace.java     |  74 +++
 .../apache/tajo/storage/TableSpaceManager.java  | 254 ++++++++
 .../storage/TextSerializerDeserializer.java     |  10 +-
 .../tajo/storage/hbase/HBasePutAppender.java    |   4 +-
 .../apache/tajo/storage/hbase/HBaseScanner.java |   7 +-
 .../tajo/storage/hbase/HBaseStorageManager.java |  40 +-
 .../storage/hbase/TestHBaseStorageManager.java  |   5 +-
 .../org/apache/tajo/storage/FileAppender.java   |   2 +-
 .../apache/tajo/storage/FileStorageManager.java | 377 ++++++++++-
 .../storage/HashShuffleAppenderManager.java     |   4 +-
 .../tajo/storage/parquet/TajoWriteSupport.java  |   7 +
 .../text/TextFieldSerializerDeserializer.java   |   8 +-
 .../tajo/storage/TestCompressionStorages.java   |   5 +-
 .../tajo/storage/TestDelimitedTextFile.java     |   9 +-
 .../tajo/storage/TestFileStorageManager.java    |  11 +-
 .../apache/tajo/storage/TestFileSystems.java    |   3 +-
 .../org/apache/tajo/storage/TestLineReader.java |   9 +-
 .../apache/tajo/storage/TestMergeScanner.java   |   7 +-
 .../org/apache/tajo/storage/TestStorages.java   |  94 ++-
 .../apache/tajo/storage/index/TestBSTIndex.java |  53 +-
 .../index/TestSingleCSVFileBSTIndex.java        |   5 +-
 .../apache/tajo/storage/json/TestJsonSerDe.java |   8 +-
 123 files changed, 2389 insertions(+), 2900 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/CHANGES
----------------------------------------------------------------------
diff --cc CHANGES
index 42d9b36,21f5e5a..4372e04
--- a/CHANGES
+++ b/CHANGES
@@@ -123,8 -128,16 +128,18 @@@ Release 0.11.0 - unrelease
  
    BUG FIXES
  
 +    TAJO-1594: Catalog schema is invalid for some databases. (jihoon)
 +
+     TAJO-1605: Fix master build failure on jdk 1.6. (jinho)
+ 
+     TAJO-1485: Datum 'Char' returned only 1byte.
+     (Contributed by DaeMyung Kang, Committed by jihoon)
+ 
+     TAJO-1586: TajoMaster HA startup failure on Yarn. (jaehwa)
+ 
+     TAJO-1598: TableMeta should change equals mechanism.
+     (Contributed by DaeMyung Kang, Committed by jihoon)
+ 
      TAJO-1593: Add missing stop condition to Taskrunner. (jinho)
  
      TAJO-1556: "insert into select" with reordered column list does not work.

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-cli/src/main/java/org/apache/tajo/cli/tsql/commands/DescTableCommand.java
----------------------------------------------------------------------
diff --cc tajo-cli/src/main/java/org/apache/tajo/cli/tsql/commands/DescTableCommand.java
index 31dfb13,6df26b7..3341c68
--- a/tajo-cli/src/main/java/org/apache/tajo/cli/tsql/commands/DescTableCommand.java
+++ b/tajo-cli/src/main/java/org/apache/tajo/cli/tsql/commands/DescTableCommand.java
@@@ -21,6 -21,6 +21,7 @@@ package org.apache.tajo.cli.tsql.comman
  import org.apache.commons.lang.CharUtils;
  import org.apache.commons.lang.StringEscapeUtils;
  import org.apache.tajo.TajoConstants;
++import org.apache.tajo.catalog.CatalogUtil;
  import org.apache.tajo.catalog.Column;
  import org.apache.tajo.catalog.TableDesc;
  import org.apache.tajo.catalog.partition.PartitionMethodDesc;

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
----------------------------------------------------------------------
diff --cc tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
index 2dd7fa8,ff42d5f..04315eb
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
@@@ -46,11 -47,8 +46,12 @@@ import org.apache.tajo.ipc.TajoWorkerPr
  import org.apache.tajo.ipc.TajoWorkerProtocol.DistinctGroupbyEnforcer.SortSpecArray;
  import org.apache.tajo.plan.LogicalPlan;
  import org.apache.tajo.plan.logical.*;
 +import org.apache.tajo.plan.serder.LogicalNodeDeserializer;
  import org.apache.tajo.plan.util.PlannerUtil;
 -import org.apache.tajo.storage.*;
 +import org.apache.tajo.storage.FileStorageManager;
 +import org.apache.tajo.storage.StorageConstants;
 +import org.apache.tajo.storage.StorageManager;
++import org.apache.tajo.storage.TableSpaceManager;
  import org.apache.tajo.storage.fragment.FileFragment;
  import org.apache.tajo.storage.fragment.Fragment;
  import org.apache.tajo.storage.fragment.FragmentConvertor;

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
----------------------------------------------------------------------
diff --cc tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
index 211d438,4612d45..7df48a4
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
@@@ -68,21 -54,10 +68,21 @@@ public class BSTIndexScanExec extends P
      super(context, scanNode.getInSchema(), scanNode.getOutSchema());
      this.scanNode = scanNode;
      this.qual = scanNode.getQual();
 -    this.datum = datum;
 +
 +    SortSpec[] keySortSpecs = new SortSpec[predicates.length];
 +    values = new Datum[predicates.length];
 +    for (int i = 0; i < predicates.length; i++) {
 +      keySortSpecs[i] = predicates[i].getKeySortSpec();
 +      values[i] = predicates[i].getValue();
 +    }
 +
 +    TupleComparator comparator = new BaseTupleComparator(keySchema,
 +        keySortSpecs);
 +
 +    Schema fileScanOutSchema = mergeSubSchemas(inSchema, keySchema, scanNode.getTargets(), qual);
  
-     this.fileScanner = StorageManager.getSeekableScanner(context.getConf(),
+     this.fileScanner = TableSpaceManager.getSeekableScanner(context.getConf(),
 -        scanNode.getTableDesc().getMeta(), scanNode.getInSchema(), fragment, outSchema);
 +        scanNode.getTableDesc().getMeta(), inSchema, fragment, fileScanOutSchema);
      this.fileScanner.init();
      this.projector = new Projector(context, inSchema, outSchema, scanNode.getTargets());
  
@@@ -92,23 -67,6 +92,23 @@@
      this.reader.open();
    }
  
 +  private static Schema mergeSubSchemas(Schema originalSchema, Schema subSchema, Target[] targets, EvalNode qual) {
 +    Schema mergedSchema = new Schema();
 +    Set<Column> qualAndTargets = TUtil.newHashSet();
 +    qualAndTargets.addAll(EvalTreeUtil.findUniqueColumns(qual));
 +    for (Target target : targets) {
 +      qualAndTargets.addAll(EvalTreeUtil.findUniqueColumns(target.getEvalTree()));
 +    }
-     for (Column column : originalSchema.getColumns()) {
++    for (Column column : originalSchema.getRootColumns()) {
 +      if (subSchema.contains(column)
 +          || qualAndTargets.contains(column)
 +          || qualAndTargets.contains(column)) {
 +        mergedSchema.addColumn(column);
 +      }
 +    }
 +    return mergedSchema;
 +  }
 +
    @Override
    public void init() throws IOException {
      super.init();

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
----------------------------------------------------------------------
diff --cc tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
index 991cb25,aeb4166..fc9212c
--- a/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
@@@ -55,8 -54,8 +54,9 @@@ import org.apache.tajo.plan.verifier.Pr
  import org.apache.tajo.plan.verifier.VerificationState;
  import org.apache.tajo.plan.verifier.VerifyException;
  import org.apache.tajo.storage.StorageManager;
+ import org.apache.tajo.storage.TableSpaceManager;
  import org.apache.tajo.util.CommonTestingUtil;
 +import org.apache.tajo.util.IPCUtil;
  
  import java.io.IOException;
  import java.sql.SQLException;

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/master/exec/DDLExecutor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/querymaster/Query.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
----------------------------------------------------------------------
diff --cc tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
index 12f4b79,d77001c..640ec60
--- a/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
+++ b/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
@@@ -36,14 -36,7 +36,13 @@@ import org.apache.tajo.algebra.Expr
  import org.apache.tajo.algebra.JsonHelper;
  import org.apache.tajo.catalog.CatalogService;
  import org.apache.tajo.catalog.TableDesc;
- import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
  import org.apache.tajo.conf.TajoConf;
 +import org.apache.tajo.plan.LogicalOptimizer;
 +import org.apache.tajo.plan.LogicalPlan;
 +import org.apache.tajo.plan.LogicalPlanner;
 +import org.apache.tajo.plan.logical.LogicalRootNode;
 +import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
 +import org.apache.tajo.plan.util.PlannerUtil;
  import org.apache.tajo.engine.planner.global.MasterPlan;
  import org.apache.tajo.engine.query.QueryContext;
  import org.apache.tajo.exception.UnimplementedException;

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/test/java/org/apache/tajo/engine/planner/global/TestBroadcastJoinPlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashAntiJoinExec.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestPhysicalPlanner.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
----------------------------------------------------------------------
diff --cc tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
index ab94aac,5efdede..35cd612
--- a/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
+++ b/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
@@@ -80,9 -80,9 +80,9 @@@ public class TestExecutionBlockCursor 
  
      analyzer = new SQLAnalyzer();
      logicalPlanner = new LogicalPlanner(catalog);
 -    optimizer = new LogicalOptimizer(conf);
 +    optimizer = new LogicalOptimizer(conf, catalog);
  
-     StorageManager sm  = StorageManager.getFileStorageManager(conf);
+     StorageManager sm  = TableSpaceManager.getFileStorageManager(conf);
      dispatcher = new AsyncDispatcher();
      dispatcher.init(conf);
      dispatcher.start();

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/tajo/blob/2cbc1b9c/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
----------------------------------------------------------------------
diff --cc tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
index ed6a570,c6b7354..7517e3a
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
@@@ -18,12 -18,10 +18,13 @@@
  
  package org.apache.tajo.plan.expr;
  
 +import org.apache.tajo.algebra.*;
  import org.apache.tajo.catalog.Column;
 +import org.apache.tajo.plan.PlanningException;
 +import org.apache.tajo.plan.visitor.SimpleAlgebraVisitor;
  
  import java.util.ArrayList;
+ import java.util.Collection;
  import java.util.List;
  import java.util.Map;
  import java.util.Stack;


[02/10] tajo git commit: TAJO-1586: TajoMaster HA startup failure on Yarn. (jaehwa)

Posted by ji...@apache.org.
TAJO-1586: TajoMaster HA startup failure on Yarn. (jaehwa)

Closes #566


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/31c4630d
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/31c4630d
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/31c4630d

Branch: refs/heads/index_support
Commit: 31c4630d5d3ce0dee1df35491c557eefad15deeb
Parents: 6977471
Author: JaeHwa Jung <bl...@apache.org>
Authored: Thu May 14 14:17:54 2015 +0900
Committer: JaeHwa Jung <bl...@apache.org>
Committed: Thu May 14 14:19:55 2015 +0900

----------------------------------------------------------------------
 CHANGES                                         |   2 +
 .../org/apache/tajo/cli/tools/TajoAdmin.java    |   6 +-
 .../org/apache/tajo/cli/tools/TajoHAAdmin.java  |   9 +-
 .../apache/tajo/client/DummyServiceTracker.java |  19 +-
 .../java/org/apache/tajo/conf/TajoConf.java     |   2 +
 .../java/org/apache/tajo/ha/HAConstants.java    |   1 +
 .../java/org/apache/tajo/ha/HAServiceUtil.java  | 253 ---------------
 .../apache/tajo/service/BaseServiceTracker.java |  31 +-
 .../apache/tajo/service/HAServiceTracker.java   |  20 +-
 .../org/apache/tajo/service/ServiceTracker.java |  28 +-
 .../org/apache/tajo/ha/HdfsServiceTracker.java  | 322 +++++++++++--------
 .../java/org/apache/tajo/master/TajoMaster.java |  21 +-
 .../main/java/org/apache/tajo/util/JSPUtil.java |   2 +-
 .../java/org/apache/tajo/worker/TajoWorker.java |   2 +
 .../resources/webapps/admin/catalogview.jsp     |  11 +-
 .../main/resources/webapps/admin/cluster.jsp    |  10 +-
 .../src/main/resources/webapps/admin/index.jsp  |  10 +-
 .../src/main/resources/webapps/admin/query.jsp  |   7 +-
 .../resources/webapps/admin/query_executor.jsp  |   9 +-
 .../apache/tajo/ha/TestHAServiceHDFSImpl.java   |  28 +-
 20 files changed, 346 insertions(+), 447 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 4912114..c9b2522 100644
--- a/CHANGES
+++ b/CHANGES
@@ -123,6 +123,8 @@ Release 0.11.0 - unreleased
 
   BUG FIXES
 
+    TAJO-1586: TajoMaster HA startup failure on Yarn. (jaehwa)
+
     TAJO-1598: TableMeta should change equals mechanism.
     (Contributed by DaeMyung Kang, Committed by jihoon)
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoAdmin.java
----------------------------------------------------------------------
diff --git a/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoAdmin.java b/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoAdmin.java
index 4f56649..739cd54 100644
--- a/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoAdmin.java
+++ b/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoAdmin.java
@@ -28,9 +28,9 @@ import org.apache.tajo.client.TajoClient;
 import org.apache.tajo.client.TajoClientImpl;
 import org.apache.tajo.client.TajoClientUtil;
 import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.ha.HAServiceUtil;
 import org.apache.tajo.ipc.ClientProtos.BriefQueryInfo;
 import org.apache.tajo.ipc.ClientProtos.WorkerResourceInfo;
+import org.apache.tajo.service.ServiceTracker;
 import org.apache.tajo.service.ServiceTrackerFactory;
 import org.apache.tajo.util.NetUtils;
 import org.apache.tajo.util.TajoIdUtils;
@@ -74,6 +74,7 @@ public class TajoAdmin {
   private TajoConf tajoConf;
   private TajoClient tajoClient;
   private Writer writer;
+  private ServiceTracker serviceTracker;
 
   public TajoAdmin(TajoConf tajoConf, Writer writer) {
     this(tajoConf, writer, null);
@@ -83,6 +84,7 @@ public class TajoAdmin {
     this.tajoConf = tajoConf;
     this.writer = writer;
     this.tajoClient = tajoClient;
+    serviceTracker = ServiceTrackerFactory.get(this.tajoConf);
   }
 
   private void printUsage() {
@@ -419,7 +421,7 @@ public class TajoAdmin {
 
     if (tajoConf.getBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE)) {
 
-      List<String> list = HAServiceUtil.getMasters(tajoConf);
+      List<String> list = serviceTracker.getMasters(tajoConf);
       int i = 0;
       for (String master : list) {
         if (i > 0) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoHAAdmin.java
----------------------------------------------------------------------
diff --git a/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoHAAdmin.java b/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoHAAdmin.java
index e25d7d4..834b6b1 100644
--- a/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoHAAdmin.java
+++ b/tajo-cli/src/main/java/org/apache/tajo/cli/tools/TajoHAAdmin.java
@@ -21,9 +21,8 @@ package org.apache.tajo.cli.tools;
 import com.google.protobuf.ServiceException;
 import org.apache.commons.cli.*;
 import org.apache.tajo.client.TajoClient;
-import org.apache.tajo.client.TajoClientImpl;
 import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.ha.HAServiceUtil;
+import org.apache.tajo.service.ServiceTracker;
 import org.apache.tajo.service.ServiceTrackerFactory;
 
 import java.io.IOException;
@@ -45,6 +44,7 @@ public class TajoHAAdmin {
 
   private TajoConf tajoConf;
   private Writer writer;
+  private ServiceTracker serviceTracker;
 
   public TajoHAAdmin(TajoConf tajoConf, Writer writer) {
     this(tajoConf, writer, null);
@@ -53,6 +53,7 @@ public class TajoHAAdmin {
   public TajoHAAdmin(TajoConf tajoConf, Writer writer, TajoClient tajoClient) {
     this.tajoConf = tajoConf;
     this.writer = writer;
+    serviceTracker = ServiceTrackerFactory.get(this.tajoConf);
   }
 
   private void printUsage() {
@@ -155,7 +156,7 @@ public class TajoHAAdmin {
   private void getState(Writer writer, String param) throws ParseException, IOException,
       ServiceException {
 
-    int retValue = HAServiceUtil.getState(param, tajoConf);
+    int retValue = serviceTracker.getState(param, tajoConf);
 
     switch (retValue) {
       case 1:
@@ -175,7 +176,7 @@ public class TajoHAAdmin {
 
   private void formatHA(Writer writer) throws ParseException, IOException,
       ServiceException {
-    int retValue = HAServiceUtil.formatHA(tajoConf);
+    int retValue = serviceTracker.formatHA(tajoConf);
 
     switch (retValue) {
       case 1:

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-client/src/main/java/org/apache/tajo/client/DummyServiceTracker.java
----------------------------------------------------------------------
diff --git a/tajo-client/src/main/java/org/apache/tajo/client/DummyServiceTracker.java b/tajo-client/src/main/java/org/apache/tajo/client/DummyServiceTracker.java
index 762c2e7..cf826ea 100644
--- a/tajo-client/src/main/java/org/apache/tajo/client/DummyServiceTracker.java
+++ b/tajo-client/src/main/java/org/apache/tajo/client/DummyServiceTracker.java
@@ -18,6 +18,7 @@
 
 package org.apache.tajo.client;
 
+import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.exception.UnsupportedException;
 import org.apache.tajo.service.ServiceTracker;
 import org.apache.tajo.service.ServiceTrackerException;
@@ -25,6 +26,7 @@ import org.apache.tajo.service.TajoMasterInfo;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.List;
 
 public class DummyServiceTracker implements ServiceTracker {
@@ -65,6 +67,21 @@ public class DummyServiceTracker implements ServiceTracker {
   }
 
   @Override
+  public int getState(String masterName, TajoConf conf) throws ServiceTrackerException {
+    return 0;
+  }
+
+  @Override
+  public int formatHA(TajoConf conf) throws ServiceTrackerException {
+    return 0;
+  }
+
+  @Override
+  public List<String> getMasters(TajoConf conf) throws ServiceTrackerException {
+    return new ArrayList<String>();
+  }
+
+  @Override
   public void register() throws IOException {
   }
 
@@ -73,7 +90,7 @@ public class DummyServiceTracker implements ServiceTracker {
   }
 
   @Override
-  public boolean isActiveStatus() {
+  public boolean isActiveMaster() {
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java b/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
index 46e7618..59b1f43 100644
--- a/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
+++ b/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
@@ -142,6 +142,8 @@ public class TajoConf extends Configuration {
     // High availability configurations
     TAJO_MASTER_HA_ENABLE("tajo.master.ha.enable", false, Validators.bool()),
     TAJO_MASTER_HA_MONITOR_INTERVAL("tajo.master.ha.monitor.interval", 5 * 1000), // 5 sec
+    TAJO_MASTER_HA_CLIENT_RETRY_MAX_NUM("tajo.master.ha.client.read.retry.max-num", 120), // 120 retry
+    TAJO_MASTER_HA_CLIENT_RETRY_PAUSE_TIME("tajo.master.ha.client.read.pause-time", 500), // 500 ms
 
     // Service discovery
     DEFAULT_SERVICE_TRACKER_CLASS("tajo.discovery.service-tracker.class", BaseServiceTracker.class.getCanonicalName()),

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-common/src/main/java/org/apache/tajo/ha/HAConstants.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/ha/HAConstants.java b/tajo-common/src/main/java/org/apache/tajo/ha/HAConstants.java
index c5f4b8a..7af19c6 100644
--- a/tajo-common/src/main/java/org/apache/tajo/ha/HAConstants.java
+++ b/tajo-common/src/main/java/org/apache/tajo/ha/HAConstants.java
@@ -24,4 +24,5 @@ public class HAConstants {
   public final static int RESOURCE_TRACKER_RPC_ADDRESS = 3;
   public final static int CATALOG_ADDRESS = 4;
   public final static int MASTER_INFO_ADDRESS = 5;
+  public final static String ACTIVE_LOCK_FILE = "active.lock";
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-common/src/main/java/org/apache/tajo/ha/HAServiceUtil.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/ha/HAServiceUtil.java b/tajo-common/src/main/java/org/apache/tajo/ha/HAServiceUtil.java
deleted file mode 100644
index 7001228..0000000
--- a/tajo-common/src/main/java/org/apache/tajo/ha/HAServiceUtil.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.ha;
-
-import org.apache.hadoop.fs.*;
-import org.apache.tajo.TajoConstants;
-import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.util.NetUtils;
-
-
-import javax.net.SocketFactory;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.util.ArrayList;
-import java.util.List;
-
-public class HAServiceUtil {
-
-  public static InetSocketAddress getMasterClientAddress(TajoConf conf) {
-    return getMasterAddress(conf, HAConstants.MASTER_CLIENT_RPC_ADDRESS);
-  }
-
-  public static String getMasterClientName(TajoConf conf) {
-    return NetUtils.normalizeInetSocketAddress(getMasterClientAddress(conf));
-  }
-
-  public static InetSocketAddress getMasterAddress(TajoConf conf, int type) {
-    InetSocketAddress masterAddress = null;
-
-    if (conf.getBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE)) {
-      try {
-        FileSystem fs = getFileSystem(conf);
-        Path activePath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_ACTIVE_DIR_NAME);
-
-        if (fs.exists(activePath)) {
-          FileStatus[] files = fs.listStatus(activePath);
-
-          if (files.length == 1) {
-            Path file = files[0].getPath();
-            String hostAddress = file.getName().replaceAll("_", ":");
-            FSDataInputStream stream = fs.open(file);
-            String data = stream.readUTF();
-            stream.close();
-
-            String[] addresses = data.split("_");
-
-            switch (type) {
-              case HAConstants.MASTER_UMBILICAL_RPC_ADDRESS:
-                masterAddress = NetUtils.createSocketAddr(hostAddress);
-                break;
-              case HAConstants.MASTER_CLIENT_RPC_ADDRESS:
-                masterAddress = NetUtils.createSocketAddr(addresses[0]);
-                break;
-              case HAConstants.RESOURCE_TRACKER_RPC_ADDRESS:
-                masterAddress = NetUtils.createSocketAddr(addresses[1]);
-                break;
-              case HAConstants.CATALOG_ADDRESS:
-                masterAddress = NetUtils.createSocketAddr(addresses[2]);
-                break;
-              case HAConstants.MASTER_INFO_ADDRESS:
-                masterAddress = NetUtils.createSocketAddr(addresses[3]);
-                break;
-              default:
-                break;
-            }
-          }
-        }
-
-      } catch (Exception e) {
-        e.printStackTrace();
-      }
-    }
-
-    if (masterAddress == null) {
-      switch (type) {
-        case HAConstants.MASTER_UMBILICAL_RPC_ADDRESS:
-          masterAddress = NetUtils.createSocketAddr(conf.getVar(TajoConf.ConfVars
-              .TAJO_MASTER_UMBILICAL_RPC_ADDRESS));
-          break;
-        case HAConstants.MASTER_CLIENT_RPC_ADDRESS:
-          masterAddress = NetUtils.createSocketAddr(conf.getVar(TajoConf.ConfVars
-              .TAJO_MASTER_CLIENT_RPC_ADDRESS));
-          break;
-        case HAConstants.RESOURCE_TRACKER_RPC_ADDRESS:
-          masterAddress = NetUtils.createSocketAddr(conf.getVar(TajoConf.ConfVars
-              .RESOURCE_TRACKER_RPC_ADDRESS));
-          break;
-        case HAConstants.CATALOG_ADDRESS:
-          masterAddress = NetUtils.createSocketAddr(conf.getVar(TajoConf.ConfVars
-              .CATALOG_ADDRESS));
-          break;
-        case HAConstants.MASTER_INFO_ADDRESS:
-          masterAddress = NetUtils.createSocketAddr(conf.getVar(TajoConf.ConfVars
-              .TAJO_MASTER_INFO_ADDRESS));
-          break;
-        default:
-          break;
-      }
-    }
-
-    return masterAddress;
-  }
-
-  public static boolean isMasterAlive(String masterName, TajoConf conf) {
-    boolean isAlive = true;
-
-    try {
-      // how to create sockets
-      SocketFactory socketFactory = org.apache.hadoop.net.NetUtils.getDefaultSocketFactory(conf);
-
-      int connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
-          CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
-
-      InetSocketAddress server = org.apache.hadoop.net.NetUtils.createSocketAddr(masterName);
-
-      // connected socket
-      Socket socket = socketFactory.createSocket();
-      org.apache.hadoop.net.NetUtils.connect(socket, server, connectionTimeout);
-    } catch (Exception e) {
-      isAlive = false;
-    }
-    return isAlive;
-  }
-
-  public static int getState(String masterName, TajoConf conf) {
-    String targetMaster = masterName.replaceAll(":", "_");
-    int retValue = -1;
-
-    try {
-      FileSystem fs = getFileSystem(conf);
-      Path activePath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_ACTIVE_DIR_NAME);
-      Path backupPath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_BACKUP_DIR_NAME);
-
-      Path temPath = null;
-
-      // Check backup masters
-      FileStatus[] files = fs.listStatus(backupPath);
-      for (FileStatus status : files) {
-        temPath = status.getPath();
-        if (temPath.getName().equals(targetMaster)) {
-          return 0;
-        }
-      }
-
-      // Check active master
-      files = fs.listStatus(activePath);
-      if (files.length == 1) {
-        temPath = files[0].getPath();
-        if (temPath.getName().equals(targetMaster)) {
-          return 1;
-        }
-      }
-      retValue = -2;
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-    return retValue;
-  }
-
-  public static int formatHA(TajoConf conf) {
-    int retValue = -1;
-    try {
-      FileSystem fs = getFileSystem(conf);
-      Path activePath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_ACTIVE_DIR_NAME);
-      Path backupPath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_BACKUP_DIR_NAME);
-      Path temPath = null;
-
-      int aliveMasterCount = 0;
-      // Check backup masters
-      FileStatus[] files = fs.listStatus(backupPath);
-      for (FileStatus status : files) {
-        temPath = status.getPath();
-        if (isMasterAlive(temPath.getName().replaceAll("_", ":"), conf)) {
-          aliveMasterCount++;
-        }
-      }
-
-      // Check active master
-      files = fs.listStatus(activePath);
-      if (files.length == 1) {
-        temPath = files[0].getPath();
-        if (isMasterAlive(temPath.getName().replaceAll("_", ":"), conf)) {
-          aliveMasterCount++;
-        }
-      }
-
-      // If there is any alive master, users can't format storage.
-      if (aliveMasterCount > 0) {
-        return 0;
-      }
-
-      // delete ha path.
-      fs.delete(TajoConf.getSystemHADir(conf), true);
-      retValue = 1;
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-    return retValue;
-  }
-
-
-  public static List<String> getMasters(TajoConf conf) {
-    List<String> list = new ArrayList<String>();
-
-    try {
-      FileSystem fs = getFileSystem(conf);
-      Path activePath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_ACTIVE_DIR_NAME);
-      Path backupPath = new Path(TajoConf.getSystemHADir(conf), TajoConstants.SYSTEM_HA_BACKUP_DIR_NAME);
-      Path temPath = null;
-
-      // Check backup masters
-      FileStatus[] files = fs.listStatus(backupPath);
-      for (FileStatus status : files) {
-        temPath = status.getPath();
-        list.add(temPath.getName().replaceAll("_", ":"));
-      }
-
-      // Check active master
-      files = fs.listStatus(activePath);
-      if (files.length == 1) {
-        temPath = files[0].getPath();
-        list.add(temPath.getName().replaceAll("_", ":"));
-      }
-
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-    return list;
-  }
-
-  private static FileSystem getFileSystem(TajoConf conf) throws IOException {
-    Path rootPath = TajoConf.getTajoRootDir(conf);
-    return rootPath.getFileSystem(conf);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-common/src/main/java/org/apache/tajo/service/BaseServiceTracker.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/service/BaseServiceTracker.java b/tajo-common/src/main/java/org/apache/tajo/service/BaseServiceTracker.java
index bf7fd2c..e598f2a 100644
--- a/tajo-common/src/main/java/org/apache/tajo/service/BaseServiceTracker.java
+++ b/tajo-common/src/main/java/org/apache/tajo/service/BaseServiceTracker.java
@@ -77,6 +77,29 @@ public class BaseServiceTracker implements ServiceTracker {
   }
 
   @Override
+  public int getState(String masterName, TajoConf conf) throws ServiceTrackerException {
+    String masterAddress = getMasterAddress();
+
+    if (masterAddress.equals(masterName)) {
+      return 1;
+    } else {
+      return 0;
+    }
+  }
+
+  @Override
+  public int formatHA(TajoConf conf) throws ServiceTrackerException {
+    throw new ServiceTrackerException("Cannot format HA directories on non-HA mode");
+  }
+
+  @Override
+  public List<String> getMasters(TajoConf conf) throws ServiceTrackerException {
+    List<String> list = TUtil.newList();
+    list.add(getMasterAddress());
+    return list;
+  }
+
+  @Override
   public void register() throws IOException {
   }
 
@@ -85,7 +108,7 @@ public class BaseServiceTracker implements ServiceTracker {
   }
 
   @Override
-  public boolean isActiveStatus() {
+  public boolean isActiveMaster() {
     return true;
   }
 
@@ -94,4 +117,10 @@ public class BaseServiceTracker implements ServiceTracker {
     return tajoMasterInfos;
   }
 
+  private String getMasterAddress() {
+    String masterAddress = tajoMasterInfo.getTajoMasterAddress().getAddress().getHostAddress() + ":" + tajoMasterInfo
+      .getTajoMasterAddress().getPort();
+
+    return masterAddress;
+  }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java b/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
index c808537..081b153 100644
--- a/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
+++ b/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
@@ -18,13 +18,18 @@
 
 package org.apache.tajo.service;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.util.FileUtil;
 
 import javax.net.SocketFactory;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 
 public abstract class HAServiceTracker implements ServiceTracker {
+  private static final Log LOG = LogFactory.getLog(HAServiceTracker.class);
 
   static SocketFactory socketFactory = SocketFactory.getDefault();
 
@@ -32,16 +37,29 @@ public abstract class HAServiceTracker implements ServiceTracker {
     return true;
   }
 
+  public static boolean checkConnection(String address) {
+    return checkConnection(address, ":");
+  }
+
+  public static boolean checkConnection(String address, String delimiter) {
+    String[] hostAddress = address.split(delimiter);
+    InetSocketAddress socketAddress = new InetSocketAddress(hostAddress[0], Integer.parseInt(hostAddress[1]));
+    return checkConnection(socketAddress);
+  }
+
   public static boolean checkConnection(InetSocketAddress address) {
     boolean isAlive = true;
+    Socket socket = null;
 
     try {
       int connectionTimeout = 10;
 
-      Socket socket = socketFactory.createSocket();
+      socket = socketFactory.createSocket();
       NetUtils.connect(socket, address, connectionTimeout);
     } catch (Exception e) {
       isAlive = false;
+    } finally {
+      FileUtil.cleanup(LOG, socket);
     }
     return isAlive;
   }

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-common/src/main/java/org/apache/tajo/service/ServiceTracker.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/service/ServiceTracker.java b/tajo-common/src/main/java/org/apache/tajo/service/ServiceTracker.java
index 73ff112..5888ff3 100644
--- a/tajo-common/src/main/java/org/apache/tajo/service/ServiceTracker.java
+++ b/tajo-common/src/main/java/org/apache/tajo/service/ServiceTracker.java
@@ -18,46 +18,54 @@
 
 package org.apache.tajo.service;
 
+import org.apache.tajo.conf.TajoConf;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.List;
 
 public interface ServiceTracker {
 
-  public abstract boolean isHighAvailable();
+  boolean isHighAvailable();
+
+  InetSocketAddress getUmbilicalAddress() throws ServiceTrackerException;
+
+  InetSocketAddress getClientServiceAddress() throws ServiceTrackerException;
+
+  InetSocketAddress getResourceTrackerAddress() throws ServiceTrackerException;
 
-  public abstract InetSocketAddress getUmbilicalAddress() throws ServiceTrackerException;
+  InetSocketAddress getCatalogAddress() throws ServiceTrackerException;
 
-  public abstract InetSocketAddress getClientServiceAddress() throws ServiceTrackerException;
+  InetSocketAddress getMasterHttpInfo() throws ServiceTrackerException;
 
-  public abstract InetSocketAddress getResourceTrackerAddress() throws ServiceTrackerException;
+  int getState(String masterName, TajoConf conf) throws ServiceTrackerException;
 
-  public abstract InetSocketAddress getCatalogAddress() throws ServiceTrackerException;
+  int formatHA(TajoConf conf) throws ServiceTrackerException;
 
-  public abstract InetSocketAddress getMasterHttpInfo() throws ServiceTrackerException;
+  List<String> getMasters(TajoConf conf) throws ServiceTrackerException;
 
   /**
    * Add master name to shared storage.
    */
-  public void register() throws IOException;
+  void register() throws IOException;
 
 
   /**
    * Delete master name to shared storage.
    *
    */
-  public void delete() throws IOException;
+  void delete() throws IOException;
 
   /**
    *
    * @return True if current master is an active master.
    */
-  public boolean isActiveStatus();
+  boolean isActiveMaster();
 
   /**
    *
    * @return return all master list
    * @throws IOException
    */
-  public List<TajoMasterInfo> getMasters() throws IOException;
+  List<TajoMasterInfo> getMasters() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/java/org/apache/tajo/ha/HdfsServiceTracker.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/ha/HdfsServiceTracker.java b/tajo-core/src/main/java/org/apache/tajo/ha/HdfsServiceTracker.java
index 4a782ec..5f1aff8 100644
--- a/tajo-core/src/main/java/org/apache/tajo/ha/HdfsServiceTracker.java
+++ b/tajo-core/src/main/java/org/apache/tajo/ha/HdfsServiceTracker.java
@@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.tajo.TajoConstants;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.conf.TajoConf.ConfVars;
@@ -31,7 +32,8 @@ import org.apache.tajo.master.TajoMaster;
 import org.apache.tajo.service.HAServiceTracker;
 import org.apache.tajo.service.ServiceTrackerException;
 import org.apache.tajo.service.TajoMasterInfo;
-import org.apache.tajo.util.TUtil;
+import org.apache.tajo.util.*;
+import org.apache.tajo.util.FileUtil;
 
 import javax.net.SocketFactory;
 import java.io.IOException;
@@ -58,7 +60,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
   private Path activePath;
   private Path backupPath;
 
-  private boolean isActiveStatus = false;
+  private boolean isActiveMaster = false;
 
   //thread which runs periodically to see the last time since a heartbeat is received.
   private Thread checkerThread;
@@ -74,8 +76,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
 
     InetSocketAddress socketAddress = conf.getSocketAddrVar(ConfVars.TAJO_MASTER_UMBILICAL_RPC_ADDRESS);
     this.masterName = socketAddress.getAddress().getHostAddress() + ":" + socketAddress.getPort();
-
-    monitorInterval = conf.getIntVar(ConfVars.TAJO_MASTER_HA_MONITOR_INTERVAL);
+    this.monitorInterval = conf.getIntVar(ConfVars.TAJO_MASTER_HA_MONITOR_INTERVAL);
   }
 
   private void initSystemDirectory() throws IOException {
@@ -113,87 +114,144 @@ public class HdfsServiceTracker extends HAServiceTracker {
     }
   }
 
+  /**
+   * It will creates the following form string. It includes
+   *
+   * <pre>
+   * {CLIENT_RPC_HOST:PORT}_{RESOURCE_TRACKER_HOST:PORT}_{CATALOG_HOST:PORT}_{MASTER_WEB_HOST:PORT}
+   * </pre>
+   *
+   * @throws IOException
+   */
   @Override
   public void register() throws IOException {
-    FileStatus[] files = fs.listStatus(activePath);
+    // Check lock file
+    boolean lockResult = createLockFile();
+
+    String fileName = masterName.replaceAll(":", "_");
+    Path activeFile = new Path(activePath, fileName);
+    Path backupFile = new Path(backupPath, fileName);
+
+    // Set TajoMasterInfo object which has several rpc server addresses.
+    StringBuilder sb = new StringBuilder();
+    InetSocketAddress address = getHostAddress(HAConstants.MASTER_UMBILICAL_RPC_ADDRESS);
+    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
+
+    address = getHostAddress(HAConstants.MASTER_CLIENT_RPC_ADDRESS);
+    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
+
+    address = getHostAddress(HAConstants.RESOURCE_TRACKER_RPC_ADDRESS);
+    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
+
+    address = getHostAddress(HAConstants.CATALOG_ADDRESS);
+    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
+
+    address = getHostAddress(HAConstants.MASTER_INFO_ADDRESS);
+    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort());
 
     // Phase 1: If there is not another active master, this try to become active master.
-    if (files.length == 0) {
-      createMasterFile(true);
+    if (lockResult) {
+      fs.delete(backupFile, false);
+      createMasterFile(activeFile, sb);
       currentActiveMaster = masterName;
+      writeSystemConf();
       LOG.info(String.format("This is added to active master (%s)", masterName));
     } else {
       // Phase 2: If there is active master information, we need to check its status.
-      Path activePath = files[0].getPath();
-      currentActiveMaster = activePath.getName().replaceAll("_", ":");
+      FileStatus[] files = fs.listStatus(activePath);
+      Path existingActiveFile = null;
+      if (files.length > 2) {
+        throw new ServiceTrackerException("Three or more than active master entries.");
+      }
+      for(FileStatus eachFile : files) {
+        if (!eachFile.getPath().getName().equals(HAConstants.ACTIVE_LOCK_FILE)) {
+          existingActiveFile = eachFile.getPath();
+        }
+      }
+      currentActiveMaster = existingActiveFile.getName().replaceAll("_", ":");
 
       // Phase 3: If current active master is dead, this master should be active master.
-      if (!HAServiceUtil.isMasterAlive(currentActiveMaster, conf)) {
-        fs.delete(activePath, true);
-        createMasterFile(true);
+      if (!checkConnection(currentActiveMaster)) {
+        fs.delete(existingActiveFile, false);
+        fs.delete(backupFile, false);
+        createMasterFile(activeFile, sb);
         currentActiveMaster = masterName;
         LOG.info(String.format("This is added to active master (%s)", masterName));
       } else {
         // Phase 4: If current active master is alive, this master need to be backup master.
-        createMasterFile(false);
-        LOG.info(String.format("This is added to backup masters (%s)", masterName));
+        if (masterName.equals(currentActiveMaster)) {
+          LOG.info(String.format("This has already been added to active master (%s)", masterName));
+        } else {
+          if (fs.exists(backupFile)) {
+            LOG.info(String.format("This has already been added to backup masters (%s)", masterName));
+          } else {
+            createMasterFile(backupFile, sb);
+            LOG.info(String.format("This is added to backup master (%s)", masterName));
+          }
+        }
       }
     }
+    startPingChecker();
   }
 
   /**
-   * It will creates the following form string. It includes
+   * Storing the system configs
    *
-   * <pre>
-   * {CLIENT_RPC_HOST:PORT}_{RESOURCE_TRACKER_HOST:PORT}_{CATALOG_HOST:PORT}_{MASTER_WEB_HOST:PORT}
-   * </pre>
-   *
-   * @param isActive A boolean flag to indicate if it is for master or not.
    * @throws IOException
    */
-  private void createMasterFile(boolean isActive) throws IOException {
-    String fileName = masterName.replaceAll(":", "_");
-    Path path = null;
+  private void writeSystemConf() throws IOException {
+    Path systemConfPath = TajoConf.getSystemConfPath(conf);
 
-    if (isActive) {
-      path = new Path(activePath, fileName);
-    } else {
-      path = new Path(backupPath, fileName);
+    FSDataOutputStream out = FileSystem.create(fs, systemConfPath,
+      new FsPermission(TajoMaster.SYSTEM_CONF_FILE_PERMISSION));
+    try {
+      conf.writeXml(out);
+    } finally {
+      out.close();
     }
+    fs.setReplication(systemConfPath, (short) conf.getIntVar(ConfVars.SYSTEM_CONF_REPLICA_COUNT));
+  }
 
-    StringBuilder sb = new StringBuilder();
-    InetSocketAddress address = getHostAddress(HAConstants.MASTER_CLIENT_RPC_ADDRESS);
-    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
-
-    address = getHostAddress(HAConstants.RESOURCE_TRACKER_RPC_ADDRESS);
-    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
-
-    address = getHostAddress(HAConstants.CATALOG_ADDRESS);
-    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");
+  private boolean createLockFile() throws IOException {
+    boolean result = false;
+    FSDataOutputStream lockOutput = null;
 
-    address = getHostAddress(HAConstants.MASTER_INFO_ADDRESS);
-    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort());
+    Path lockFile = new Path(activePath, HAConstants.ACTIVE_LOCK_FILE);
+    try {
+      lockOutput = fs.create(lockFile, false);
+      lockOutput.hsync();
+      lockOutput.close();
+      fs.deleteOnExit(lockFile);
+      result = true;
+    } catch (FileAlreadyExistsException e) {
+      LOG.info(String.format("Lock file already exists at (%s)", lockFile.toString()));
+      result = false;
+    } catch (Exception e) {
+      throw new IOException("Lock file creation is failed - " + e.getMessage());
+    } finally {
+      FileUtil.cleanup(LOG, lockOutput);
+    }
 
-    FSDataOutputStream out = fs.create(path);
+    return result;
+  }
 
+  private void createMasterFile(Path path, StringBuilder sb) throws IOException {
+    FSDataOutputStream out = null;
     try {
+      out = fs.create(path, false);
+
       out.writeUTF(sb.toString());
       out.hsync();
       out.close();
-    } catch (FileAlreadyExistsException e) {
-      createMasterFile(false);
-    }
 
-    if (isActive) {
-      isActiveStatus = true;
-    } else {
-      isActiveStatus = false;
+      fs.deleteOnExit(path);
+    } catch (Exception e) {
+      throw new IOException("File creation is failed - " + e.getMessage());
+    } finally {
+      FileUtil.cleanup(LOG, out);
     }
-
-    startPingChecker();
   }
 
-
   private InetSocketAddress getHostAddress(int type) {
     InetSocketAddress address = null;
 
@@ -226,65 +284,61 @@ public class HdfsServiceTracker extends HAServiceTracker {
 
   @Override
   public void delete() throws IOException {
+    if (ShutdownHookManager.get().isShutdownInProgress()) return;
+
     String fileName = masterName.replaceAll(":", "_");
 
-    Path activeFile = new Path(activePath, fileName);
-    if (fs.exists(activeFile)) {
-      fs.delete(activeFile, true);
-    }
+    fs.delete(new Path(activePath, fileName), false);
+    fs.delete(new Path(activePath, HAConstants.ACTIVE_LOCK_FILE), false);
+    fs.delete(new Path(backupPath, fileName), false);
 
-    Path backupFile = new Path(backupPath, fileName);
-    if (fs.exists(backupFile)) {
-      fs.delete(backupFile, true);
-    }
-    if (isActiveStatus) {
-      isActiveStatus = false;
-    }
     stopped = true;
   }
 
   @Override
-  public boolean isActiveStatus() {
-    return isActiveStatus;
+  public boolean isActiveMaster() {
+    if (currentActiveMaster.equals(masterName)) {
+      return true;
+    } else {
+      return false;
+    }
   }
 
   @Override
   public List<TajoMasterInfo> getMasters() throws IOException {
     List<TajoMasterInfo> list = TUtil.newList();
-    Path path = null;
 
     FileStatus[] files = fs.listStatus(activePath);
-    if (files.length == 1) {
-      path = files[0].getPath();
-      list.add(createTajoMasterInfo(path, true));
+    for(FileStatus status : files) {
+      if (!status.getPath().getName().equals(HAConstants.ACTIVE_LOCK_FILE)) {
+        list.add(getTajoMasterInfo(status.getPath(), true));
+      }
     }
 
     files = fs.listStatus(backupPath);
     for (FileStatus status : files) {
-      path = status.getPath();
-      list.add(createTajoMasterInfo(path, false));
+      list.add(getTajoMasterInfo(status.getPath(), false));
     }
 
     return list;
   }
 
-  private TajoMasterInfo createTajoMasterInfo(Path path, boolean isActive) throws IOException {
+  private TajoMasterInfo getTajoMasterInfo(Path path, boolean isActive) throws IOException {
     String masterAddress = path.getName().replaceAll("_", ":");
-    boolean isAlive = HAServiceUtil.isMasterAlive(masterAddress, conf);
+    boolean isAlive = checkConnection(masterAddress);
 
     FSDataInputStream stream = fs.open(path);
     String data = stream.readUTF();
-
     stream.close();
 
     String[] addresses = data.split("_");
     TajoMasterInfo info = new TajoMasterInfo();
 
-    info.setTajoMasterAddress(NetUtils.createSocketAddr(masterAddress));
-    info.setTajoClientAddress(NetUtils.createSocketAddr(addresses[0]));
-    info.setWorkerResourceTrackerAddr(NetUtils.createSocketAddr(addresses[1]));
-    info.setCatalogAddress(NetUtils.createSocketAddr(addresses[2]));
-    info.setWebServerAddress(NetUtils.createSocketAddr(addresses[3]));
+    info.setTajoMasterAddress(NetUtils.createSocketAddr(addresses[0]));
+    info.setTajoClientAddress(NetUtils.createSocketAddr(addresses[1]));
+    info.setWorkerResourceTrackerAddr(NetUtils.createSocketAddr(addresses[2]));
+    info.setCatalogAddress(NetUtils.createSocketAddr(addresses[3]));
+    info.setWebServerAddress(NetUtils.createSocketAddr(addresses[4]));
 
     info.setAvailable(isAlive);
     info.setActive(isActive);
@@ -299,21 +353,18 @@ public class HdfsServiceTracker extends HAServiceTracker {
         synchronized (HdfsServiceTracker.this) {
           try {
             if (!currentActiveMaster.equals(masterName)) {
-              boolean isAlive = HAServiceUtil.isMasterAlive(currentActiveMaster, conf);
               if (LOG.isDebugEnabled()) {
-                LOG.debug("currentActiveMaster:" + currentActiveMaster + ", thisMasterName:" + masterName
-                  + ", isAlive:" + isAlive);
+                LOG.debug("currentActiveMaster:" + currentActiveMaster + ", thisMasterName:" + masterName);
               }
 
               // If active master is dead, this master should be active master instead of
               // previous active master.
-              if (!isAlive) {
-                FileStatus[] files = fs.listStatus(activePath);
-                if (files.length == 0 || (files.length ==  1
-                  && currentActiveMaster.equals(files[0].getPath().getName().replaceAll("_", ":")))) {
-                  delete();
-                  register();
-                }
+              if (!checkConnection(currentActiveMaster)) {
+                Path activeFile = new Path(activePath, currentActiveMaster.replaceAll(":", "_"));
+                fs.delete(activeFile, false);
+                Path lockFile = new Path(activePath, HAConstants.ACTIVE_LOCK_FILE);
+                fs.delete(lockFile, false);
+                register();
               }
             }
           } catch (Exception e) {
@@ -345,7 +396,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
   @Override
   public InetSocketAddress getUmbilicalAddress() {
     if (!checkConnection(umbilicalRpcAddr)) {
-      umbilicalRpcAddr = NetUtils.createSocketAddr(getAddressElements(conf).get(MASTER_UMBILICAL_RPC_ADDRESS));
+      umbilicalRpcAddr = NetUtils.createSocketAddr(getAddressElements().get(MASTER_UMBILICAL_RPC_ADDRESS));
     }
 
     return umbilicalRpcAddr;
@@ -354,7 +405,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
   @Override
   public InetSocketAddress getClientServiceAddress() {
     if (!checkConnection(clientRpcAddr)) {
-      clientRpcAddr = NetUtils.createSocketAddr(getAddressElements(conf).get(MASTER_CLIENT_RPC_ADDRESS));
+      clientRpcAddr = NetUtils.createSocketAddr(getAddressElements().get(MASTER_CLIENT_RPC_ADDRESS));
     }
 
     return clientRpcAddr;
@@ -363,7 +414,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
   @Override
   public InetSocketAddress getResourceTrackerAddress() {
     if (!checkConnection(resourceTrackerRpcAddr)) {
-      resourceTrackerRpcAddr = NetUtils.createSocketAddr(getAddressElements(conf).get(RESOURCE_TRACKER_RPC_ADDRESS));
+      resourceTrackerRpcAddr = NetUtils.createSocketAddr(getAddressElements().get(RESOURCE_TRACKER_RPC_ADDRESS));
     }
 
     return resourceTrackerRpcAddr;
@@ -372,7 +423,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
   @Override
   public InetSocketAddress getCatalogAddress() {
     if (!checkConnection(catalogAddr)) {
-      catalogAddr = NetUtils.createSocketAddr(getAddressElements(conf).get(CATALOG_ADDRESS));
+      catalogAddr = NetUtils.createSocketAddr(getAddressElements().get(CATALOG_ADDRESS));
     }
 
     return catalogAddr;
@@ -381,7 +432,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
   @Override
   public InetSocketAddress getMasterHttpInfo() throws ServiceTrackerException {
     if (!checkConnection(masterHttpInfoAddr)) {
-      masterHttpInfoAddr = NetUtils.createSocketAddr(getAddressElements(conf).get(MASTER_HTTP_INFO));
+      masterHttpInfoAddr = NetUtils.createSocketAddr(getAddressElements().get(MASTER_HTTP_INFO));
     }
 
     return masterHttpInfoAddr;
@@ -390,11 +441,10 @@ public class HdfsServiceTracker extends HAServiceTracker {
   /**
    * Reads a text file stored in HDFS file, and then return all service addresses read from a HDFS file.   *
    *
-   * @param conf
    * @return all service addresses
    * @throws ServiceTrackerException
    */
-  private static List<String> getAddressElements(TajoConf conf) throws ServiceTrackerException {
+  private synchronized List<String> getAddressElements() throws ServiceTrackerException {
 
     try {
       FileSystem fs = getFileSystem(conf);
@@ -408,15 +458,34 @@ public class HdfsServiceTracker extends HAServiceTracker {
       }
 
       FileStatus[] files = fs.listStatus(activeMasterBaseDir);
+      /* wait for active master from HDFS */
+      int pause = conf.getIntVar(ConfVars.TAJO_MASTER_HA_CLIENT_RETRY_PAUSE_TIME);
+      int maxRetry = conf.getIntVar(ConfVars.TAJO_MASTER_HA_CLIENT_RETRY_MAX_NUM);
+      int retry = 0;
+
+      while (files.length < 2 && retry < maxRetry) {
+        try {
+          this.wait(pause);
+        } catch (InterruptedException e) {
+          throw new ServiceTrackerException(e);
+        }
+        files = fs.listStatus(activeMasterBaseDir);
+      }
 
       if (files.length < 1) {
+        LOG.error("Exceeded the maximum retry (" + maxRetry + ") to read TajoMaster address from HDFS");
         throw new ServiceTrackerException("No active master entry");
-      } else if (files.length > 1) {
-        throw new ServiceTrackerException("Two or more than active master entries.");
+      } else if (files.length > 2) {
+        throw new ServiceTrackerException("Three or more than active master entries.");
       }
 
-      // We can ensure that there is only one file due to the above assertion.
-      Path activeMasterEntry = files[0].getPath();
+      Path activeMasterEntry = null;
+
+      for (FileStatus eachFile : files) {
+        if (!eachFile.getPath().getName().equals(HAConstants.ACTIVE_LOCK_FILE)) {
+          activeMasterEntry = eachFile.getPath();
+        }
+      }
 
       if (!fs.isFile(activeMasterEntry)) {
         throw new ServiceTrackerException("Active master entry must be a file, but it is a directory.");
@@ -424,12 +493,9 @@ public class HdfsServiceTracker extends HAServiceTracker {
 
       List<String> addressElements = TUtil.newList();
 
-      addressElements.add(activeMasterEntry.getName().replaceAll("_", ":")); // Add UMBILICAL_RPC_ADDRESS to elements
-
       FSDataInputStream stream = fs.open(activeMasterEntry);
       String data = stream.readUTF();
       stream.close();
-
       addressElements.addAll(TUtil.newList(data.split("_"))); // Add remains entries to elements
 
       // ensure the number of entries
@@ -442,33 +508,8 @@ public class HdfsServiceTracker extends HAServiceTracker {
     }
   }
 
-
-  public static boolean isMasterAlive(InetSocketAddress masterAddress, TajoConf conf) {
-    return isMasterAlive(org.apache.tajo.util.NetUtils.normalizeInetSocketAddress(masterAddress), conf);
-  }
-
-  public static boolean isMasterAlive(String masterName, TajoConf conf) {
-    boolean isAlive = true;
-
-    try {
-      // how to create sockets
-      SocketFactory socketFactory = org.apache.hadoop.net.NetUtils.getDefaultSocketFactory(conf);
-
-      int connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
-          CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
-
-      InetSocketAddress server = org.apache.hadoop.net.NetUtils.createSocketAddr(masterName);
-
-      // connected socket
-      Socket socket = socketFactory.createSocket();
-      org.apache.hadoop.net.NetUtils.connect(socket, server, connectionTimeout);
-    } catch (Exception e) {
-      isAlive = false;
-    }
-    return isAlive;
-  }
-
-  public static int getState(String masterName, TajoConf conf) {
+  @Override
+  public int getState(String masterName, TajoConf conf) throws ServiceTrackerException {
     String targetMaster = masterName.replaceAll(":", "_");
     int retValue = -1;
 
@@ -498,12 +539,13 @@ public class HdfsServiceTracker extends HAServiceTracker {
       }
       retValue = -2;
     } catch (Exception e) {
-      e.printStackTrace();
+      throw new ServiceTrackerException("Cannot get HA state - ERROR:" + e.getMessage());
     }
     return retValue;
   }
 
-  public static int formatHA(TajoConf conf) {
+  @Override
+  public int formatHA(TajoConf conf) throws ServiceTrackerException{
     int retValue = -1;
     try {
       FileSystem fs = getFileSystem(conf);
@@ -512,20 +554,20 @@ public class HdfsServiceTracker extends HAServiceTracker {
       Path temPath = null;
 
       int aliveMasterCount = 0;
+
       // Check backup masters
       FileStatus[] files = fs.listStatus(backupPath);
-      for (FileStatus status : files) {
-        temPath = status.getPath();
-        if (isMasterAlive(temPath.getName().replaceAll("_", ":"), conf)) {
+      for (FileStatus eachFile : files) {
+        if (checkConnection(eachFile.getPath().getName(), "_")) {
           aliveMasterCount++;
         }
       }
 
       // Check active master
       files = fs.listStatus(activePath);
-      if (files.length == 1) {
-        temPath = files[0].getPath();
-        if (isMasterAlive(temPath.getName().replaceAll("_", ":"), conf)) {
+      for (FileStatus eachFile : files) {
+        if (!eachFile.getPath().getName().equals(HAConstants.ACTIVE_LOCK_FILE) &&
+            checkConnection(eachFile.getPath().getName(), "_")) {
           aliveMasterCount++;
         }
       }
@@ -539,13 +581,13 @@ public class HdfsServiceTracker extends HAServiceTracker {
       fs.delete(TajoConf.getSystemHADir(conf), true);
       retValue = 1;
     } catch (Exception e) {
-      e.printStackTrace();
+      throw new ServiceTrackerException("Cannot format HA directories - ERROR:" + e.getMessage());
     }
     return retValue;
   }
 
-
-  public static List<String> getMasters(TajoConf conf) {
+  @Override
+  public List<String> getMasters(TajoConf conf) throws ServiceTrackerException {
     List<String> list = new ArrayList<String>();
 
     try {
@@ -569,7 +611,7 @@ public class HdfsServiceTracker extends HAServiceTracker {
       }
 
     } catch (Exception e) {
-      e.printStackTrace();
+      throw new ServiceTrackerException("Cannot get master lists - ERROR:" + e.getMessage());
     }
     return list;
   }
@@ -578,4 +620,4 @@ public class HdfsServiceTracker extends HAServiceTracker {
     Path rootPath = TajoConf.getTajoRootDir(conf);
     return rootPath.getFileSystem(conf);
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java b/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
index d6ae49c..fb2a160 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
@@ -245,11 +245,6 @@ public class TajoMaster extends CompositeService {
     }
   }
 
-  public boolean isActiveMaster() {
-    return (haService != null ? haService.isActiveStatus() : true);
-  }
-
-
   private void checkAndInitializeSystemDirectories() throws IOException {
     // Get Tajo root dir
     this.tajoRootPath = TajoConf.getTajoRootDir(systemConf);
@@ -360,14 +355,18 @@ public class TajoMaster extends CompositeService {
       defaultFS.delete(systemConfPath, false);
     }
 
-    FSDataOutputStream out = FileSystem.create(defaultFS, systemConfPath,
+    // In TajoMaster HA, some master might see LeaseExpiredException because of lease mismatch. Thus,
+    // we need to create below xml file at HdfsServiceTracker::writeSystemConf.
+    if (!systemConf.getBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE)) {
+      FSDataOutputStream out = FileSystem.create(defaultFS, systemConfPath,
         new FsPermission(SYSTEM_CONF_FILE_PERMISSION));
-    try {
-      systemConf.writeXml(out);
-    } finally {
-      out.close();
+      try {
+        systemConf.writeXml(out);
+      } finally {
+        out.close();
+      }
+      defaultFS.setReplication(systemConfPath, (short) systemConf.getIntVar(ConfVars.SYSTEM_CONF_REPLICA_COUNT));
     }
-    defaultFS.setReplication(systemConfPath, (short) systemConf.getIntVar(ConfVars.SYSTEM_CONF_REPLICA_COUNT));
   }
 
   private void checkBaseTBSpaceAndDatabase() throws IOException {

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/java/org/apache/tajo/util/JSPUtil.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/util/JSPUtil.java b/tajo-core/src/main/java/org/apache/tajo/util/JSPUtil.java
index aee2ced..578b15a 100644
--- a/tajo-core/src/main/java/org/apache/tajo/util/JSPUtil.java
+++ b/tajo-core/src/main/java/org/apache/tajo/util/JSPUtil.java
@@ -197,7 +197,7 @@ public class JSPUtil {
     ServiceTracker haService = context.getHAService();
     String activeLabel = "";
     if (haService != null) {
-      if (haService.isActiveStatus()) {
+      if (haService.isActiveMaster()) {
         activeLabel = "<font color='#1e90ff'>(active)</font>";
       } else {
         activeLabel = "<font color='#1e90ff'>(backup)</font>";

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java b/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
index b666f80..0cecd73 100644
--- a/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
+++ b/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
@@ -40,6 +40,7 @@ import org.apache.tajo.function.FunctionSignature;
 import org.apache.tajo.rpc.RpcClientManager;
 import org.apache.tajo.rpc.RpcConstants;
 import org.apache.tajo.service.ServiceTracker;
+import org.apache.tajo.service.ServiceTrackerException;
 import org.apache.tajo.service.ServiceTrackerFactory;
 import org.apache.tajo.service.TajoMasterInfo;
 import org.apache.tajo.ipc.QueryCoordinatorProtocol.ClusterResourceSummary;
@@ -322,6 +323,7 @@ public class TajoWorker extends CompositeService {
     startJvmPauseMonitor();
 
     tajoMasterInfo = new TajoMasterInfo();
+
     if (systemConf.getBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE)) {
       tajoMasterInfo.setTajoMasterAddress(serviceTracker.getUmbilicalAddress());
       tajoMasterInfo.setWorkerResourceTrackerAddr(serviceTracker.getResourceTrackerAddress());

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/resources/webapps/admin/catalogview.jsp
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/resources/webapps/admin/catalogview.jsp b/tajo-core/src/main/resources/webapps/admin/catalogview.jsp
index e014379..43ec5ca 100644
--- a/tajo-core/src/main/resources/webapps/admin/catalogview.jsp
+++ b/tajo-core/src/main/resources/webapps/admin/catalogview.jsp
@@ -30,8 +30,15 @@
 <%@ page import="java.util.Collection" %>
 <%@ page import="java.util.List" %>
 <%@ page import="java.util.Map" %>
+<%@ page import="org.apache.tajo.service.ServiceTracker" %>
+<%@ page import="java.net.InetSocketAddress" %>
 <%
   TajoMaster master = (TajoMaster) StaticHttpServer.getInstance().getAttribute("tajo.info.server.object");
+
+  String[] masterName = master.getMasterName().split(":");
+  InetSocketAddress socketAddress = new InetSocketAddress(masterName[0], Integer.parseInt(masterName[1]));
+  String masterLabel = socketAddress.getAddress().getHostName()+ ":" + socketAddress.getPort();
+
   CatalogService catalog = master.getCatalog();
 
   String catalogType = request.getParameter("type");
@@ -62,7 +69,7 @@
   ServiceTracker haService = master.getContext().getHAService();
   String activeLabel = "";
   if (haService != null) {
-    if (haService.isActiveStatus()) {
+    if (haService.isActiveMaster()) {
       activeLabel = "<font color='#1e90ff'>(active)</font>";
     } else {
       activeLabel = "<font color='#1e90ff'>(backup)</font>";
@@ -80,7 +87,7 @@
 <body>
 <%@ include file="header.jsp"%>
 <div class='contents'>
-  <h2>Tajo Master: <%=master.getMasterName()%> <%=activeLabel%></h2>
+  <h2>Tajo Master: <%=masterLabel%> <%=activeLabel%></h2>
   <hr/>
   <h3>Catalog</h3>
   <div>

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/resources/webapps/admin/cluster.jsp
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/resources/webapps/admin/cluster.jsp b/tajo-core/src/main/resources/webapps/admin/cluster.jsp
index 816a144..97ca698 100644
--- a/tajo-core/src/main/resources/webapps/admin/cluster.jsp
+++ b/tajo-core/src/main/resources/webapps/admin/cluster.jsp
@@ -31,9 +31,15 @@
 <%@ page import="org.apache.tajo.webapp.StaticHttpServer" %>
 <%@ page import="java.util.*" %>
 <%@ page import="org.apache.tajo.service.ServiceTracker" %>
+<%@ page import="java.net.InetSocketAddress" %>
 
 <%
   TajoMaster master = (TajoMaster) StaticHttpServer.getInstance().getAttribute("tajo.info.server.object");
+
+  String[] masterName = master.getMasterName().split(":");
+  InetSocketAddress socketAddress = new InetSocketAddress(masterName[0], Integer.parseInt(masterName[1]));
+  String masterLabel = socketAddress.getAddress().getHostName()+ ":" + socketAddress.getPort();
+
   Map<Integer, Worker> workers = master.getContext().getResourceManager().getWorkers();
   List<Integer> wokerKeys = new ArrayList<Integer>(workers.keySet());
   Collections.sort(wokerKeys);
@@ -72,7 +78,7 @@
 
   String activeLabel = "";
   if (haService != null) {
-    if (haService.isActiveStatus()) {
+    if (haService.isActiveMaster()) {
       activeLabel = "<font color='#1e90ff'>(active)</font>";
     } else {
       activeLabel = "<font color='#1e90ff'>(backup)</font>";
@@ -105,7 +111,7 @@
 <body>
 <%@ include file="header.jsp"%>
 <div class='contents'>
-  <h2>Tajo Master: <%=master.getMasterName()%> <%=activeLabel%></h2>
+  <h2>Tajo Master: <%=masterLabel%> <%=activeLabel%></h2>
   <div>Live:<%=numLiveMasters%>, Dead: <%=deadMasterHtml%>, Total: <%=masters.size()%></div>
 <%
   if (masters != null) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/resources/webapps/admin/index.jsp
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/resources/webapps/admin/index.jsp b/tajo-core/src/main/resources/webapps/admin/index.jsp
index 468fc72..e0cf876 100644
--- a/tajo-core/src/main/resources/webapps/admin/index.jsp
+++ b/tajo-core/src/main/resources/webapps/admin/index.jsp
@@ -35,10 +35,16 @@
 <%@ page import="java.util.Collection" %>
 <%@ page import="java.util.Date" %>
 <%@ page import="java.util.Map" %>
+<%@ page import="java.net.InetSocketAddress" %>
 <%@ page import="org.apache.tajo.service.ServiceTracker" %>
 
 <%
   TajoMaster master = (TajoMaster) StaticHttpServer.getInstance().getAttribute("tajo.info.server.object");
+
+  String[] masterName = master.getMasterName().split(":");
+  InetSocketAddress socketAddress = new InetSocketAddress(masterName[0], Integer.parseInt(masterName[1]));
+  String masterLabel = socketAddress.getAddress().getHostName()+ ":" + socketAddress.getPort();
+
   Map<Integer, Worker> workers = master.getContext().getResourceManager().getWorkers();
   Map<Integer, Worker> inactiveWorkers = master.getContext().getResourceManager().getInactiveWorkers();
 
@@ -83,7 +89,7 @@
 
   String activeLabel = "";
   if (haService != null) {
-    if (haService.isActiveStatus()) {
+    if (haService.isActiveMaster()) {
       activeLabel = "<font color='#1e90ff'>(active)</font>";
     } else {
       activeLabel = "<font color='#1e90ff'>(backup)</font>";
@@ -114,7 +120,7 @@
 <body>
 <%@ include file="header.jsp"%>
 <div class='contents'>
-  <h2>Tajo Master: <%=master.getMasterName()%> <%=activeLabel%></h2>
+  <h2>Tajo Master: <%=masterLabel%> <%=activeLabel%></h2>
   <hr/>
   <h3>Master Status</h3>
   <table border='0'>

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/resources/webapps/admin/query.jsp
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/resources/webapps/admin/query.jsp b/tajo-core/src/main/resources/webapps/admin/query.jsp
index 43e7775..ca376bb 100644
--- a/tajo-core/src/main/resources/webapps/admin/query.jsp
+++ b/tajo-core/src/main/resources/webapps/admin/query.jsp
@@ -29,10 +29,15 @@
 <%@ page import="java.util.*" %>
 <%@ page import="org.apache.tajo.util.history.HistoryReader" %>
 <%@ page import="org.apache.tajo.master.QueryInfo" %>
+<%@ page import="java.net.InetSocketAddress" %>
 
 <%
   TajoMaster master = (TajoMaster) StaticHttpServer.getInstance().getAttribute("tajo.info.server.object");
 
+  String[] masterName = master.getMasterName().split(":");
+  InetSocketAddress socketAddress = new InetSocketAddress(masterName[0], Integer.parseInt(masterName[1]));
+  String masterLabel = socketAddress.getAddress().getHostName()+ ":" + socketAddress.getPort();
+
   List<QueryInProgress> runningQueries =
           new ArrayList<QueryInProgress>(master.getContext().getQueryJobManager().getSubmittedQueries());
 
@@ -113,7 +118,7 @@
 <body>
 <%@ include file="header.jsp"%>
 <div class='contents'>
-  <h2>Tajo Master: <%=master.getMasterName()%> <%=JSPUtil.getMasterActiveLabel(master.getContext())%></h2>
+  <h2>Tajo Master: <%=masterLabel%> <%=JSPUtil.getMasterActiveLabel(master.getContext())%></h2>
   <hr/>
   <h3>Running Queries</h3>
 <%

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/main/resources/webapps/admin/query_executor.jsp
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/resources/webapps/admin/query_executor.jsp b/tajo-core/src/main/resources/webapps/admin/query_executor.jsp
index a0f9a0a..1a58583 100644
--- a/tajo-core/src/main/resources/webapps/admin/query_executor.jsp
+++ b/tajo-core/src/main/resources/webapps/admin/query_executor.jsp
@@ -22,14 +22,19 @@
 <%@ page import="org.apache.tajo.service.ServiceTracker" %>
 <%@ page import="org.apache.tajo.webapp.StaticHttpServer" %>
 <%@ page import="javax.xml.ws.Service" %>
+<%@ page import="java.net.InetSocketAddress" %>
 
 <%
   TajoMaster master = (TajoMaster) StaticHttpServer.getInstance().getAttribute("tajo.info.server.object");
 
+  String[] masterName = master.getMasterName().split(":");
+  InetSocketAddress socketAddress = new InetSocketAddress(masterName[0], Integer.parseInt(masterName[1]));
+  String masterLabel = socketAddress.getAddress().getHostName()+ ":" + socketAddress.getPort();
+
   ServiceTracker haService = master.getContext().getHAService();
   String activeLabel = "";
   if (haService != null) {
-      if (haService.isActiveStatus()) {
+      if (haService.isActiveMaster()) {
       activeLabel = "<font color='#1e90ff'>(active)</font>";
     } else {
       activeLabel = "<font color='#1e90ff'>(backup)</font>";
@@ -288,7 +293,7 @@ function getPage() {
 <body>
 <%@ include file="header.jsp"%>
 <div class='contents'>
-  <h2>Tajo Master: <%=master.getMasterName()%> <%=activeLabel%></h2>
+  <h2>Tajo Master: <%=masterLabel%> <%=activeLabel%></h2>
   <hr/>
   <h3>Query</h3>
   Database :  

http://git-wip-us.apache.org/repos/asf/tajo/blob/31c4630d/tajo-core/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java b/tajo-core/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
index 6415588..c714749 100644
--- a/tajo-core/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
+++ b/tajo-core/src/test/java/org/apache/tajo/ha/TestHAServiceHDFSImpl.java
@@ -30,6 +30,7 @@ import org.apache.tajo.service.ServiceTracker;
 import org.apache.tajo.service.ServiceTrackerFactory;
 import org.junit.Test;
 
+import static junit.framework.Assert.assertTrue;
 import static junit.framework.TestCase.assertEquals;
 import static org.junit.Assert.*;
 
@@ -68,12 +69,12 @@ public class TestHAServiceHDFSImpl  {
 
       verifySystemDirectories(fs);
 
-      Path backupMasterFile = new Path(backupPath, backupMaster.getMasterName()
-        .replaceAll(":", "_"));
-      assertTrue(fs.exists(backupMasterFile));
+      assertEquals(2, fs.listStatus(activePath).length);
+      assertEquals(1, fs.listStatus(backupPath).length);
 
-      assertTrue(cluster.getMaster().isActiveMaster());
-      assertFalse(backupMaster.isActiveMaster());
+      assertTrue(fs.exists(new Path(activePath, HAConstants.ACTIVE_LOCK_FILE)));
+      assertTrue(fs.exists(new Path(activePath, cluster.getMaster().getMasterName().replaceAll(":", "_"))));
+      assertTrue(fs.exists(new Path(backupPath, backupMaster.getMasterName().replaceAll(":", "_"))));
 
       createDatabaseAndTable();
       verifyDataBaseAndTable();
@@ -81,13 +82,14 @@ public class TestHAServiceHDFSImpl  {
 
       cluster.getMaster().stop();
 
-      Thread.sleep(7000);
-
-      assertFalse(cluster.getMaster().isActiveMaster());
-      assertTrue(backupMaster.isActiveMaster());
-
       client = cluster.newTajoClient();
       verifyDataBaseAndTable();
+
+      assertEquals(2, fs.listStatus(activePath).length);
+      assertEquals(0, fs.listStatus(backupPath).length);
+
+      assertTrue(fs.exists(new Path(activePath, HAConstants.ACTIVE_LOCK_FILE)));
+      assertTrue(fs.exists(new Path(activePath, backupMaster.getMasterName().replaceAll(":", "_"))));
     } finally {
       client.close();
       backupMaster.stop();
@@ -107,11 +109,12 @@ public class TestHAServiceHDFSImpl  {
     conf.setVar(TajoConf.ConfVars.CATALOG_ADDRESS,
       masterAddress + ":" + NetUtils.getFreeSocketPort());
     conf.setVar(TajoConf.ConfVars.TAJO_MASTER_INFO_ADDRESS,
-      masterAddress + ":" + NetUtils.getFreeSocketPort());
+        masterAddress + ":" + NetUtils.getFreeSocketPort());
     conf.setIntVar(TajoConf.ConfVars.REST_SERVICE_PORT,
         NetUtils.getFreeSocketPort());
 
     conf.setBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE, true);
+    conf.setIntVar(TajoConf.ConfVars.TAJO_MASTER_HA_MONITOR_INTERVAL, 1000);
 
     //Client API service RPC Server
     conf.setIntVar(TajoConf.ConfVars.MASTER_SERVICE_RPC_SERVER_WORKER_THREAD_NUM, 2);
@@ -134,9 +137,6 @@ public class TestHAServiceHDFSImpl  {
 
     backupPath = new Path(haPath, TajoConstants.SYSTEM_HA_BACKUP_DIR_NAME);
     assertTrue(fs.exists(backupPath));
-
-    assertEquals(1, fs.listStatus(activePath).length);
-    assertEquals(1, fs.listStatus(backupPath).length);
   }
 
   private void createDatabaseAndTable() throws Exception {


[05/10] tajo git commit: TAJO-1542 Refactoring of HashJoinExecs. (contributed by navis, committed by hyunsik)

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
index e20686b..9afc51f 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
@@ -319,24 +319,17 @@ public class TestLeftOuterHashJoinExec {
     PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
 
     ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof NLLeftOuterJoinExec) {
-       //for this small data set this is not likely to happen
-      
-      assertEquals(1, 0);
-    }
-    else{
-       Tuple tuple;
-       int count = 0;
-       int i = 1;
-       exec.init();
-  
-       while ((tuple = exec.next()) != null) {
-         //TODO check contents
-         count = count + 1;
-       }
-       exec.close();
-       assertEquals(5, count);
+    Tuple tuple;
+    int count = 0;
+    int i = 1;
+    exec.init();
+
+    while ((tuple = exec.next()) != null) {
+      //TODO check contents
+      count = count + 1;
     }
+    exec.close();
+    assertEquals(5, count);
   }
 
     @Test
@@ -361,24 +354,17 @@ public class TestLeftOuterHashJoinExec {
     PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
 
     ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof NLLeftOuterJoinExec) {
-      //for this small data set this is not likely to happen
-      
-      assertEquals(1, 0);
-    }
-    else{
-       Tuple tuple;
-       int count = 0;
-       int i = 1;
-       exec.init();
-  
-       while ((tuple = exec.next()) != null) {
-         //TODO check contents
-         count = count + 1;
-       }
-       exec.close();
-       assertEquals(7, count);
+    Tuple tuple;
+    int count = 0;
+    int i = 1;
+    exec.init();
+
+    while ((tuple = exec.next()) != null) {
+      //TODO check contents
+      count = count + 1;
     }
+    exec.close();
+    assertEquals(7, count);
   }
 
 
@@ -403,24 +389,17 @@ public class TestLeftOuterHashJoinExec {
     PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
 
     ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof NLLeftOuterJoinExec) {
-      //for this small data set this is not likely to happen
-      
-      assertEquals(1, 0);
-    }
-    else{
-       Tuple tuple;
-       int count = 0;
-       int i = 1;
-       exec.init();
-  
-       while ((tuple = exec.next()) != null) {
-         //TODO check contents
-         count = count + 1;
-       }
-       exec.close();
-       assertEquals(7, count);
+    Tuple tuple;
+    int count = 0;
+    int i = 1;
+    exec.init();
+
+    while ((tuple = exec.next()) != null) {
+      //TODO check contents
+      count = count + 1;
     }
+    exec.close();
+    assertEquals(7, count);
   }
 
   
@@ -445,22 +424,15 @@ public class TestLeftOuterHashJoinExec {
     PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
 
     ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof NLLeftOuterJoinExec) {
-      //for this small data set this is not likely to happen
-      
-      assertEquals(1, 0);
-    }
-    else{
-       int count = 0;
-       exec.init();
-  
-       while (exec.next() != null) {
-         //TODO check contents
-         count = count + 1;
-       }
-       exec.close();
-       assertEquals(0, count);
+    int count = 0;
+    exec.init();
+
+    while (exec.next() != null) {
+      //TODO check contents
+      count = count + 1;
     }
+    exec.close();
+    assertEquals(0, count);
   }
   
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterNLJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterNLJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterNLJoinExec.java
deleted file mode 100644
index 10fd3d4..0000000
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterNLJoinExec.java
+++ /dev/null
@@ -1,474 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.engine.planner.physical;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.tajo.LocalTajoTestingUtility;
-import org.apache.tajo.TajoTestingCluster;
-import org.apache.tajo.algebra.Expr;
-import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
-import org.apache.tajo.common.TajoDataTypes.Type;
-import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.datum.Datum;
-import org.apache.tajo.datum.DatumFactory;
-import org.apache.tajo.engine.parser.SQLAnalyzer;
-import org.apache.tajo.plan.LogicalPlanner;
-import org.apache.tajo.engine.planner.PhysicalPlanner;
-import org.apache.tajo.engine.planner.PhysicalPlannerImpl;
-import org.apache.tajo.plan.PlanningException;
-import org.apache.tajo.engine.planner.enforce.Enforcer;
-import org.apache.tajo.plan.logical.LogicalNode;
-import org.apache.tajo.engine.query.QueryContext;
-import org.apache.tajo.storage.*;
-import org.apache.tajo.storage.fragment.FileFragment;
-import org.apache.tajo.util.CommonTestingUtil;
-import org.apache.tajo.util.TUtil;
-import org.apache.tajo.worker.TaskAttemptContext;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
-import static org.apache.tajo.TajoConstants.DEFAULT_TABLESPACE_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-public class TestLeftOuterNLJoinExec {
-  private TajoConf conf;
-  private final String TEST_PATH = TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestLeftOuterNLJoinExec";
-  private TajoTestingCluster util;
-  private CatalogService catalog;
-  private SQLAnalyzer analyzer;
-  private LogicalPlanner planner;
-  private QueryContext defaultContext;
-  private Path testDir;
-
-  private TableDesc dep3;
-  private TableDesc job3;
-  private TableDesc emp3;
-  private TableDesc phone3;
-
-  private final String DEP3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "dep3");
-  private final String JOB3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "job3");
-  private final String EMP3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "emp3");
-  private final String PHONE3_NAME = CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "phone3");
-
-  @Before
-  public void setUp() throws Exception {
-    util = new TajoTestingCluster();
-    catalog = util.startCatalogCluster().getCatalog();
-    testDir = CommonTestingUtil.getTestDir(TEST_PATH);
-    catalog.createTablespace(DEFAULT_TABLESPACE_NAME, testDir.toUri().toString());
-    catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
-    conf = util.getConfiguration();
-
-    //----------------- dep3 ------------------------------
-    // dep_id | dep_name  | loc_id
-    //--------------------------------
-    //  0     | dep_0     | 1000
-    //  1     | dep_1     | 1001
-    //  2     | dep_2     | 1002
-    //  3     | dep_3     | 1003
-    //  4     | dep_4     | 1004
-    //  5     | dep_5     | 1005
-    //  6     | dep_6     | 1006
-    //  7     | dep_7     | 1007
-    //  8     | dep_8     | 1008
-    //  9     | dep_9     | 1009
-    Schema dep3Schema = new Schema();
-    dep3Schema.addColumn("dep_id", Type.INT4);
-    dep3Schema.addColumn("dep_name", Type.TEXT);
-    dep3Schema.addColumn("loc_id", Type.INT4);
-
-
-    TableMeta dep3Meta = CatalogUtil.newTableMeta("CSV");
-    Path dep3Path = new Path(testDir, "dep3.csv");
-    Appender appender1 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
-        .getAppender(dep3Meta, dep3Schema, dep3Path);
-    appender1.init();
-    Tuple tuple = new VTuple(dep3Schema.size());
-    for (int i = 0; i < 10; i++) {
-      tuple.put(new Datum[] { DatumFactory.createInt4(i),
-                    DatumFactory.createText("dept_" + i),
-                    DatumFactory.createInt4(1000 + i) });
-      appender1.addTuple(tuple);
-    }
-
-    appender1.flush();
-    appender1.close();
-    dep3 = CatalogUtil.newTableDesc(DEP3_NAME, dep3Schema, dep3Meta, dep3Path);
-    catalog.createTable(dep3);
-
-    //----------------- job3 ------------------------------
-    //  job_id  | job_title
-    // ----------------------
-    //   101    |  job_101
-    //   102    |  job_102
-    //   103    |  job_103
-
-    Schema job3Schema = new Schema();
-    job3Schema.addColumn("job_id", Type.INT4);
-    job3Schema.addColumn("job_title", Type.TEXT);
-
-
-    TableMeta job3Meta = CatalogUtil.newTableMeta("CSV");
-    Path job3Path = new Path(testDir, "job3.csv");
-    Appender appender2 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
-        .getAppender(job3Meta, job3Schema, job3Path);
-    appender2.init();
-    Tuple tuple2 = new VTuple(job3Schema.size());
-    for (int i = 1; i < 4; i++) {
-      int x = 100 + i;
-      tuple2.put(new Datum[] { DatumFactory.createInt4(100 + i),
-                    DatumFactory.createText("job_" + x) });
-      appender2.addTuple(tuple2);
-    }
-
-    appender2.flush();
-    appender2.close();
-    job3 = CatalogUtil.newTableDesc(JOB3_NAME, job3Schema, job3Meta, job3Path);
-    catalog.createTable(job3);
-
-
-
-    //---------------------emp3 --------------------
-    // emp_id  | first_name | last_name | dep_id | salary | job_id
-    // ------------------------------------------------------------
-    //  11     |  fn_11     |  ln_11    |  1     | 123    | 101
-    //  13     |  fn_13     |  ln_13    |  3     | 369    | 103
-    //  15     |  fn_15     |  ln_15    |  5     | 615    | null
-    //  17     |  fn_17     |  ln_17    |  7     | 861    | null
-    //  19     |  fn_19     |  ln_19    |  9     | 1107   | null
-    //  21     |  fn_21     |  ln_21    |  1     | 123    | 101
-    //  23     |  fn_23     |  ln_23    |  3     | 369    | 103
-
-    Schema emp3Schema = new Schema();
-    emp3Schema.addColumn("emp_id", Type.INT4);
-    emp3Schema.addColumn("first_name", Type.TEXT);
-    emp3Schema.addColumn("last_name", Type.TEXT);
-    emp3Schema.addColumn("dep_id", Type.INT4);
-    emp3Schema.addColumn("salary", Type.FLOAT4);
-    emp3Schema.addColumn("job_id", Type.INT4);
-
-
-    TableMeta emp3Meta = CatalogUtil.newTableMeta("CSV");
-    Path emp3Path = new Path(testDir, "emp3.csv");
-    Appender appender3 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
-        .getAppender(emp3Meta, emp3Schema, emp3Path);
-    appender3.init();
-    Tuple tuple3 = new VTuple(emp3Schema.size());
-
-    for (int i = 1; i < 4; i += 2) {
-      int x = 10 + i;
-      tuple3.put(new Datum[] { DatumFactory.createInt4(10 + i),
-          DatumFactory.createText("firstname_" + x),
-          DatumFactory.createText("lastname_" + x),
-          DatumFactory.createInt4(i),
-          DatumFactory.createFloat4(123 * i),
-          DatumFactory.createInt4(100 + i) });
-      appender3.addTuple(tuple3);
-
-      int y = 20 + i;
-      tuple3.put(new Datum[] { DatumFactory.createInt4(20 + i),
-          DatumFactory.createText("firstname_" + y),
-          DatumFactory.createText("lastname_" + y),
-          DatumFactory.createInt4(i),
-          DatumFactory.createFloat4(123 * i),
-          DatumFactory.createInt4(100 + i) });
-      appender3.addTuple(tuple3);
-    }
-
-    for (int i = 5; i < 10; i += 2) {
-      int x = 10 + i;
-      tuple3.put(new Datum[] { DatumFactory.createInt4(10 + i),
-          DatumFactory.createText("firstname_" + x),
-          DatumFactory.createText("lastname_" + x),
-          DatumFactory.createInt4(i),
-          DatumFactory.createFloat4(123 * i),
-          DatumFactory.createNullDatum() });
-      appender3.addTuple(tuple3);
-    }
-
-    appender3.flush();
-    appender3.close();
-    emp3 = CatalogUtil.newTableDesc(EMP3_NAME, emp3Schema, emp3Meta, emp3Path);
-    catalog.createTable(emp3);
-
-    // ---------------------phone3 --------------------
-    // emp_id  | phone_number
-    // -----------------------------------------------
-    // this table is empty, no rows
-
-    Schema phone3Schema = new Schema();
-    phone3Schema.addColumn("emp_id", Type.INT4);
-    phone3Schema.addColumn("phone_number", Type.TEXT);
-
-
-    TableMeta phone3Meta = CatalogUtil.newTableMeta("CSV");
-    Path phone3Path = new Path(testDir, "phone3.csv");
-    Appender appender5 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
-        .getAppender(phone3Meta, phone3Schema, phone3Path);
-    appender5.init();
-    
-    appender5.flush();
-    appender5.close();
-    phone3 = CatalogUtil.newTableDesc(PHONE3_NAME, phone3Schema, phone3Meta, phone3Path);
-    catalog.createTable(phone3);
-
-    analyzer = new SQLAnalyzer();
-    planner = new LogicalPlanner(catalog);
-
-    defaultContext = LocalTajoTestingUtility.createDummyContext(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    util.shutdownCatalogCluster();
-  }
-  
-  String[] QUERIES = {
-      "select dep3.dep_id, dep_name, emp_id, salary from dep3 left outer join emp3 on dep3.dep_id = emp3.dep_id", //0 no nulls
-      "select job3.job_id, job_title, emp_id, salary from job3 left outer join emp3 on job3.job_id=emp3.job_id", //1 nulls on the right operand
-      "select job3.job_id, job_title, emp_id, salary from emp3 left outer join job3 on job3.job_id=emp3.job_id", //2 nulls on the left side
-      "select emp3.emp_id, first_name, phone_number from emp3 left outer join phone3 on emp3.emp_id = phone3.emp_id", //3 one operand is empty
-      "select phone_number, emp3.emp_id, first_name from phone3 left outer join emp3 on emp3.emp_id = phone3.emp_id" //4 one operand is empty
-  };
-
-  @Test
-  public final void testLeftOuterNLJoinExec0() throws IOException, PlanningException {
-    FileFragment[] dep3Frags = FileStorageManager.splitNG(conf, DEP3_NAME, dep3.getMeta(), new Path(dep3.getPath()),
-        Integer.MAX_VALUE);
-    FileFragment[] emp3Frags = FileStorageManager.splitNG(conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getPath()),
-        Integer.MAX_VALUE);
-
-    FileFragment[] merged = TUtil.concat(dep3Frags, emp3Frags);
-
-    Path workDir = CommonTestingUtil.getTestDir(TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestLeftOuterNLJoinExec0");
-    TaskAttemptContext ctx = new TaskAttemptContext(new QueryContext(conf),
-        LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
-    ctx.setEnforcer(new Enforcer());
-    Expr context =  analyzer.parse(QUERIES[0]);
-    LogicalNode plan = planner.createPlan(defaultContext, context).getRootBlock().getRoot();
-
-
-    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
-    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
-
-    //maybe plan results with hash join exec algorithm usage. Must convert from HashLeftOuterJoinExec into NLLeftOuterJoinExec
-    ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof HashLeftOuterJoinExec) {
-      HashLeftOuterJoinExec join = proj.getChild();
-      NLLeftOuterJoinExec aJoin = new NLLeftOuterJoinExec(ctx, join.getPlan(), join.getLeftChild(), join.getRightChild());
-      proj.setChild(aJoin);
-      exec = proj;
-    }
-
-    int count = 0;
-    exec.init();
-    while (exec.next() != null) {
-       //TODO check contents
-         count = count + 1;
-    }
-    assertNull(exec.next());
-    exec.close();
-    assertEquals(12, count);
-  }
-
-
-  @Test
-  public final void testLeftOuterNLJoinExec1() throws IOException, PlanningException {
-    FileFragment[] job3Frags = FileStorageManager.splitNG(conf, JOB3_NAME, job3.getMeta(), new Path(job3.getPath()),
-        Integer.MAX_VALUE);
-    FileFragment[] emp3Frags = FileStorageManager.splitNG(conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getPath()),
-        Integer.MAX_VALUE);
-
-    FileFragment[] merged = TUtil.concat(job3Frags, emp3Frags);
-
-
-    Path workDir = CommonTestingUtil.getTestDir(TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestLeftOuter_NLJoinExec1");
-    TaskAttemptContext ctx = new TaskAttemptContext(new QueryContext(conf),
-        LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
-    ctx.setEnforcer(new Enforcer());
-    Expr context =  analyzer.parse(QUERIES[1]);
-    LogicalNode plan = planner.createPlan(defaultContext, context).getRootBlock().getRoot();
-
-
-    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
-    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
-    
-    //maybe plan results with hash join exec algorithm usage. Must convert from HashLeftOuterJoinExec into NLLeftOuterJoinExec
-    ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof HashLeftOuterJoinExec) {
-      HashLeftOuterJoinExec join = proj.getChild();
-      NLLeftOuterJoinExec aJoin = new NLLeftOuterJoinExec(ctx, join.getPlan(), join.getLeftChild(), join.getRightChild());
-      proj.setChild(aJoin);
-      exec = proj;
-     
-    }
-
-
-    Tuple tuple;
-    int i = 1;
-    int count = 0;
-    exec.init();
-    while ((tuple = exec.next()) != null) {
-       //TODO check contents
-         count = count + 1;
-      
-    }
-    exec.close();
-    assertEquals(5, count);
-  }
-
-  @Test
-  public final void testLeftOuter_NLJoinExec2() throws IOException, PlanningException {
-    FileFragment[] emp3Frags = FileStorageManager.splitNG(conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getPath()),
-        Integer.MAX_VALUE);
-    FileFragment[] job3Frags = FileStorageManager.splitNG(conf, JOB3_NAME, job3.getMeta(), new Path(job3.getPath()),
-        Integer.MAX_VALUE);
-
-    FileFragment[] merged = TUtil.concat(emp3Frags, job3Frags);
-
-    Path workDir = CommonTestingUtil.getTestDir(TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestLeftOuter_NLJoinExec2");
-    TaskAttemptContext ctx = new TaskAttemptContext(new QueryContext(conf),
-        LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
-    ctx.setEnforcer(new Enforcer());
-    Expr context =  analyzer.parse(QUERIES[2]);
-    LogicalNode plan = planner.createPlan(defaultContext, context).getRootBlock().getRoot();
-
-
-    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
-    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
-    
-    //maybe plan results with hash join exec algorithm usage. Must convert from HashLeftOuterJoinExec into NLLeftOuterJoinExec
-    ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof HashLeftOuterJoinExec) {
-      HashLeftOuterJoinExec join = proj.getChild();
-      NLLeftOuterJoinExec aJoin = new NLLeftOuterJoinExec(ctx, join.getPlan(), join.getLeftChild(), join.getRightChild());
-      proj.setChild(aJoin);
-      exec = proj;
-     
-    }
-
-
-    Tuple tuple;
-    int i = 1;
-    int count = 0;
-    exec.init();
-    while ((tuple = exec.next()) != null) {
-       //TODO check contents
-         count = count + 1;
-      
-    }
-    exec.close();
-    assertEquals(7, count);
-  }
-
-
-  @Test
-  public final void testLeftOuter_NLJoinExec3() throws IOException, PlanningException {
-    FileFragment[] emp3Frags = FileStorageManager.splitNG(conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getPath()),
-        Integer.MAX_VALUE);
-    FileFragment[] phone3Frags = FileStorageManager.splitNG(conf, PHONE3_NAME, phone3.getMeta(), new Path(phone3.getPath()),
-        Integer.MAX_VALUE);
-
-    FileFragment[] merged = TUtil.concat(emp3Frags, phone3Frags);
-
-    Path workDir = CommonTestingUtil.getTestDir(TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestLeftOuter_NLJoinExec3");
-    TaskAttemptContext ctx = new TaskAttemptContext(new QueryContext(conf),
-        LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
-    ctx.setEnforcer(new Enforcer());
-    Expr context =  analyzer.parse(QUERIES[3]);
-    LogicalNode plan = planner.createPlan(defaultContext, context).getRootBlock().getRoot();
-
-
-    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
-    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
-    
-    //maybe plan results with hash join exec algorithm usage. Must convert from HashLeftOuterJoinExec into NLLeftOuterJoinExec
-    ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof HashLeftOuterJoinExec) {
-      HashLeftOuterJoinExec join = proj.getChild();
-      NLLeftOuterJoinExec aJoin = new NLLeftOuterJoinExec(ctx, join.getPlan(), join.getLeftChild(), join.getRightChild());
-      proj.setChild(aJoin);
-      exec = proj;
-     
-    }
-
-
-    Tuple tuple;
-    int i = 1;
-    int count = 0;
-    exec.init();
-    while ((tuple = exec.next()) != null) {
-       //TODO check contents
-         count = count + 1;
-      
-    }
-    exec.close();
-    assertEquals(7, count);
-  }
-
-    @Test
-  public final void testLeftOuter_NLJoinExec4() throws IOException, PlanningException {
-    FileFragment[] emp3Frags = FileStorageManager.splitNG(conf, EMP3_NAME, emp3.getMeta(), new Path(emp3.getPath()),
-        Integer.MAX_VALUE);
-    FileFragment[] phone3Frags = FileStorageManager.splitNG(conf, PHONE3_NAME, phone3.getMeta(), new Path(phone3.getPath()),
-        Integer.MAX_VALUE);
-
-    FileFragment[] merged = TUtil.concat(phone3Frags, emp3Frags);
-
-    Path workDir = CommonTestingUtil.getTestDir(TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestLeftOuter_NLJoinExec4");
-    TaskAttemptContext ctx = new TaskAttemptContext(new QueryContext(conf),
-        LocalTajoTestingUtility.newTaskAttemptId(), merged, workDir);
-    ctx.setEnforcer(new Enforcer());
-    Expr context =  analyzer.parse(QUERIES[4]);
-    LogicalNode plan = planner.createPlan(defaultContext, context).getRootBlock().getRoot();
-
-
-    PhysicalPlanner phyPlanner = new PhysicalPlannerImpl(conf);
-    PhysicalExec exec = phyPlanner.createPlan(ctx, plan);
-    
-    //maybe plan results with hash join exec algorithm usage. Must convert from HashLeftOuterJoinExec into NLLeftOuterJoinExec
-    ProjectionExec proj = (ProjectionExec) exec;
-    if (proj.getChild() instanceof HashLeftOuterJoinExec) {
-      HashLeftOuterJoinExec join = proj.getChild();
-      NLLeftOuterJoinExec aJoin = new NLLeftOuterJoinExec(ctx, join.getPlan(), join.getLeftChild(), join.getRightChild());
-      proj.setChild(aJoin);
-      exec = proj;
-     
-    }
-
-
-    Tuple tuple;
-    int i = 1;
-    int count = 0;
-    exec.init();
-    while ((tuple = exec.next()) != null) {
-       //TODO check contents
-         count = count + 1;
-      
-    }
-    exec.close();
-    assertEquals(0, count);
-  }
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/test/resources/queries/TestJoinQuery/testJoinFilterOfRowPreservedTable1.sql
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/resources/queries/TestJoinQuery/testJoinFilterOfRowPreservedTable1.sql b/tajo-core/src/test/resources/queries/TestJoinQuery/testJoinFilterOfRowPreservedTable1.sql
index 66274d7..50ea371 100644
--- a/tajo-core/src/test/resources/queries/TestJoinQuery/testJoinFilterOfRowPreservedTable1.sql
+++ b/tajo-core/src/test/resources/queries/TestJoinQuery/testJoinFilterOfRowPreservedTable1.sql
@@ -5,4 +5,4 @@ select
   n_regionkey
 from
   region left outer join nation on n_regionkey = r_regionkey and r_name in ('AMERICA', 'ASIA')
-order by r_name;
\ No newline at end of file
+order by r_name,n_name;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/test/resources/results/TestJoinQuery/testJoinFilterOfRowPreservedTable1.result
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/resources/results/TestJoinQuery/testJoinFilterOfRowPreservedTable1.result b/tajo-core/src/test/resources/results/TestJoinQuery/testJoinFilterOfRowPreservedTable1.result
index 82d5562..d489e3e 100644
--- a/tajo-core/src/test/resources/results/TestJoinQuery/testJoinFilterOfRowPreservedTable1.result
+++ b/tajo-core/src/test/resources/results/TestJoinQuery/testJoinFilterOfRowPreservedTable1.result
@@ -6,10 +6,10 @@ AMERICA,1,BRAZIL,1
 AMERICA,1,CANADA,1
 AMERICA,1,PERU,1
 AMERICA,1,UNITED STATES,1
+ASIA,2,CHINA,2
 ASIA,2,INDIA,2
 ASIA,2,INDONESIA,2
 ASIA,2,JAPAN,2
-ASIA,2,CHINA,2
 ASIA,2,VIETNAM,2
 EUROPE,3,null,null
 MIDDLE EAST,4,null,null
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
index cfcc829..33d6565 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
@@ -78,7 +78,7 @@ public class AggregationFunctionCallEval extends FunctionEval implements Cloneab
   }
 
   public void merge(FunctionContext context, Tuple tuple) {
-    if (!isBinded) {
+    if (!isBound) {
       throw new IllegalStateException("bind() must be called before merge()");
     }
     mergeParam(context, evalParams(tuple));
@@ -99,7 +99,7 @@ public class AggregationFunctionCallEval extends FunctionEval implements Cloneab
   }
 
   public Datum terminate(FunctionContext context) {
-    if (!isBinded) {
+    if (!isBound) {
       throw new IllegalStateException("bind() must be called before terminate()");
     }
     if (!finalPhase) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
index 6cf7272..c6b7354 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
@@ -21,6 +21,7 @@ package org.apache.tajo.plan.expr;
 import org.apache.tajo.catalog.Column;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Stack;
@@ -327,6 +328,10 @@ public class AlgebraicUtil {
         (expr.getType() == EvalType.LIKE && !((LikePredicateEval)expr).isLeadingWildCard());
   }
 
+  public static EvalNode createSingletonExprFromCNF(Collection<EvalNode> cnfExprs) {
+    return createSingletonExprFromCNF(cnfExprs.toArray(new EvalNode[cnfExprs.size()]));
+  }
+
   /**
    * Convert a list of conjunctive normal forms into a singleton expression.
    *

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
index 9abb0bc..b154532 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
@@ -35,43 +35,44 @@ import org.apache.tajo.storage.Tuple;
  * It is also used for evaluation.
  */
 public abstract class EvalNode implements Cloneable, GsonObject, ProtoObject<PlanProto.EvalNodeTree> {
-	@Expose protected EvalType type;
-  protected boolean isBinded = false;
+  @Expose
+  protected EvalType type;
+  protected transient boolean isBound;
 
   public EvalNode() {
   }
 
-	public EvalNode(EvalType type) {
-		this.type = type;
-	}
-	
-	public EvalType getType() {
-		return this.type;
-	}
-	
-	public abstract DataType getValueType();
+  public EvalNode(EvalType type) {
+    this.type = type;
+  }
+
+  public EvalType getType() {
+    return this.type;
+  }
+
+  public abstract DataType getValueType();
 
   public abstract int childNum();
 
   public abstract EvalNode getChild(int idx);
-	
-	public abstract String getName();
+
+  public abstract String getName();
 
   @Override
-	public String toJson() {
+  public String toJson() {
     return PlanGsonHelper.toJson(this, EvalNode.class);
-	}
+  }
 
   public EvalNode bind(@Nullable EvalContext evalContext, Schema schema) {
     for (int i = 0; i < childNum(); i++) {
       getChild(i).bind(evalContext, schema);
     }
-    isBinded = true;
+    isBound = true;
     return this;
   }
 
-	public <T extends Datum> T eval(Tuple tuple) {
-    if (!isBinded) {
+  public <T extends Datum> T eval(Tuple tuple) {
+    if (!isBound) {
       throw new IllegalStateException("bind() must be called before eval()");
     }
     return null;
@@ -87,7 +88,7 @@ public abstract class EvalNode implements Cloneable, GsonObject, ProtoObject<Pla
   public Object clone() throws CloneNotSupportedException {
     EvalNode evalNode = (EvalNode) super.clone();
     evalNode.type = type;
-    evalNode.isBinded = isBinded;
+    evalNode.isBound = isBound;
     return evalNode;
   }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-plan/src/main/java/org/apache/tajo/plan/expr/InEval.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/InEval.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/InEval.java
index c968bda..7052663 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/InEval.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/InEval.java
@@ -58,7 +58,7 @@ public class InEval extends BinaryEval {
 
   @Override
   public Datum eval(Tuple tuple) {
-    if (!isBinded) {
+    if (!isBound) {
       throw new IllegalStateException("bind() must be called before eval()");
     }
     if (values == null) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-plan/src/main/java/org/apache/tajo/plan/expr/PatternMatchPredicateEval.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/PatternMatchPredicateEval.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/PatternMatchPredicateEval.java
index cdd8dfb..ec143f7 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/PatternMatchPredicateEval.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/PatternMatchPredicateEval.java
@@ -82,7 +82,7 @@ public abstract class PatternMatchPredicateEval extends BinaryEval {
 
   @Override
   public Datum eval(Tuple tuple) {
-    if (!isBinded) {
+    if (!isBound) {
       throw new IllegalStateException("bind() must be called before eval()");
     }
     Datum predicand = leftExpr.eval(tuple);

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
index a39d303..e5b88f2 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
@@ -64,7 +64,7 @@ public class WindowFunctionEval extends AggregationFunctionCallEval implements C
 
   @Override
   public Datum terminate(FunctionContext context) {
-    if (!isBinded) {
+    if (!isBound) {
       throw new IllegalStateException("bind() must be called before terminate()");
     }
     return functionInvoke.terminate(context);

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/FrameTuple.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/FrameTuple.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/FrameTuple.java
index 8b7e2e0..a5561ed 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/FrameTuple.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/FrameTuple.java
@@ -30,7 +30,7 @@ import org.apache.tajo.exception.UnsupportedException;
 /**
  * An instance of FrameTuple is an immutable tuple.
  * It contains two tuples and pretends to be one instance of Tuple for
- * join qual evaluatations.
+ * join qual evaluations.
  */
 public class FrameTuple implements Tuple, Cloneable {
   private int size;
@@ -52,6 +52,18 @@ public class FrameTuple implements Tuple, Cloneable {
     this.right = right;
   }
 
+  public FrameTuple setLeft(Tuple left) {
+    this.left = left;
+    this.leftSize = left.size();
+    return this;
+  }
+
+  public FrameTuple setRight(Tuple right) {
+    this.right = right;
+    this.size = leftSize + right.size();
+    return this;
+  }
+
   @Override
   public int size() {
     return size;


[07/10] tajo git commit: TAJO-1603: Refactor StorageManager. (hyunsik)

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
index 6a0080c..286902a 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
@@ -155,7 +155,7 @@ public class TestStorages {
 
       TableMeta meta = CatalogUtil.newTableMeta(storeType);
       Path tablePath = new Path(testDir, "Splitable.data");
-      FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+      FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
       Appender appender = sm.getAppender(meta, schema, tablePath);
       appender.enableStats();
       appender.init();
@@ -210,7 +210,7 @@ public class TestStorages {
 
       TableMeta meta = CatalogUtil.newTableMeta(storeType);
       Path tablePath = new Path(testDir, "Splitable.data");
-      FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+      FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
       Appender appender = sm.getAppender(meta, schema, tablePath);
       appender.enableStats();
       appender.init();
@@ -271,7 +271,7 @@ public class TestStorages {
     }
 
     Path tablePath = new Path(testDir, "testProjection.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.init();
     int tupleNum = 10000;
@@ -347,7 +347,7 @@ public class TestStorages {
       meta.putOption(StorageConstants.AVRO_SCHEMA_URL, path);
     }
 
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Path tablePath = new Path(testDir, "testVariousTypes.data");
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.init();
@@ -425,7 +425,7 @@ public class TestStorages {
     }
 
     Path tablePath = new Path(testDir, "testVariousTypes.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.init();
 
@@ -469,7 +469,7 @@ public class TestStorages {
 
     FileStatus status = fs.getFileStatus(tablePath);
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     Tuple retrieved;
@@ -513,7 +513,7 @@ public class TestStorages {
     meta.putOption(StorageConstants.CSVFILE_SERDE, TextSerializerDeserializer.class.getName());
 
     Path tablePath = new Path(testDir, "testVariousTypes.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -545,7 +545,7 @@ public class TestStorages {
     assertEquals(appender.getStats().getNumBytes().longValue(), status.getLen());
 
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     Tuple retrieved;
@@ -583,7 +583,7 @@ public class TestStorages {
     meta.putOption(StorageConstants.RCFILE_SERDE, BinarySerializerDeserializer.class.getName());
 
     Path tablePath = new Path(testDir, "testVariousTypes.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -615,7 +615,7 @@ public class TestStorages {
     assertEquals(appender.getStats().getNumBytes().longValue(), status.getLen());
 
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     Tuple retrieved;
@@ -653,7 +653,7 @@ public class TestStorages {
     meta.putOption(StorageConstants.SEQUENCEFILE_SERDE, TextSerializerDeserializer.class.getName());
 
     Path tablePath = new Path(testDir, "testVariousTypes.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -685,7 +685,7 @@ public class TestStorages {
     assertEquals(appender.getStats().getNumBytes().longValue(), status.getLen());
 
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     assertTrue(scanner instanceof SequenceFileScanner);
@@ -727,7 +727,7 @@ public class TestStorages {
     meta.putOption(StorageConstants.SEQUENCEFILE_SERDE, BinarySerializerDeserializer.class.getName());
 
     Path tablePath = new Path(testDir, "testVariousTypes.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -759,7 +759,7 @@ public class TestStorages {
     assertEquals(appender.getStats().getNumBytes().longValue(), status.getLen());
 
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     assertTrue(scanner instanceof SequenceFileScanner);
@@ -789,7 +789,7 @@ public class TestStorages {
       TableMeta meta = CatalogUtil.newTableMeta(storeType, options);
 
       Path tablePath = new Path(testDir, "testTime.data");
-      FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+      FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
       Appender appender = sm.getAppender(meta, schema, tablePath);
       appender.init();
 
@@ -805,7 +805,7 @@ public class TestStorages {
 
       FileStatus status = fs.getFileStatus(tablePath);
       FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-      Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+      Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
       scanner.init();
 
       Tuple retrieved;
@@ -831,7 +831,7 @@ public class TestStorages {
 
     TableMeta meta = CatalogUtil.newTableMeta(storeType);
     Path tablePath = new Path(testDir, "Seekable.data");
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     FileAppender appender = (FileAppender) sm.getAppender(meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -873,7 +873,7 @@ public class TestStorages {
     long readBytes = 0;
     long readRows = 0;
     for (long offset : offsets) {
-      scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema,
+      scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema,
 	        new FileFragment("table", tablePath, prevOffset, offset - prevOffset), schema);
       scanner.init();
 
@@ -913,7 +913,7 @@ public class TestStorages {
     }
 
     if (storeType.equalsIgnoreCase("RAW")) {
-      StorageManager.clearCache();
+      TableSpaceManager.clearCache();
       /* TAJO-1250 reproduce BufferOverflow of RAWFile */
       int headerSize = 4 + 2 + 1; //Integer record length + Short null-flag length + 1 byte null flags
       /* max varint32: 5 bytes, max varint64: 10 bytes */
@@ -921,7 +921,7 @@ public class TestStorages {
       conf.setInt(RawFile.WRITE_BUFFER_SIZE, record + headerSize);
     }
 
-    FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Path tablePath = new Path(testDir, "testMaxValue.data");
     Appender appender = sm.getAppender(meta, schema, tablePath);
 
@@ -955,7 +955,7 @@ public class TestStorages {
 
 
     if (storeType.equalsIgnoreCase("RAW")){
-      StorageManager.clearCache();
+      TableSpaceManager.clearCache();
     }
   }
 
@@ -977,7 +977,7 @@ public class TestStorages {
     meta.setOptions(CatalogUtil.newPhysicalProperties(storeType));
 
     Path tablePath = new Path(testDir, "testLessThanSchemaSize.data");
-    FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, dataSchema, tablePath);
     appender.init();
 
@@ -1003,7 +1003,7 @@ public class TestStorages {
     inSchema.addColumn("col5", Type.INT8);
 
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, inSchema, fragment);
+    Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, inSchema, fragment);
 
     Schema target = new Schema();
 
@@ -1041,7 +1041,7 @@ public class TestStorages {
     meta.setOptions(CatalogUtil.newPhysicalProperties(storeType));
 
     Path tablePath = new Path(testDir, "test_storetype_oversize.data");
-    FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender = sm.getAppender(meta, dataSchema, tablePath);
     appender.init();
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestBSTIndex.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestBSTIndex.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestBSTIndex.java
index 114a9cc..ae0fd58 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestBSTIndex.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestBSTIndex.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.DatumFactory;
@@ -90,7 +89,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testFindValue_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
     for (int i = 0; i < TUPLE_NUM; i++) {
@@ -125,7 +124,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -148,7 +147,7 @@ public class TestBSTIndex {
     tuple = new VTuple(keySchema.size());
     BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "testFindValue_" + storeType + ".idx"), keySchema, comp);
     reader.open();
-    scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     for (int i = 0; i < TUPLE_NUM - 1; i++) {
@@ -178,7 +177,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testBuildIndexWithAppender_" + storeType);
-    FileAppender appender = (FileAppender) ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    FileAppender appender = (FileAppender) ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(meta, schema, tablePath);
     appender.init();
 
@@ -227,7 +226,7 @@ public class TestBSTIndex {
     BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "testBuildIndexWithAppender_" + storeType + ".idx"),
         keySchema, comp);
     reader.open();
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     for (int i = 0; i < TUPLE_NUM - 1; i++) {
@@ -257,7 +256,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = StorageUtil.concatPath(testDir, "testFindOmittedValue_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
     for (int i = 0; i < TUPLE_NUM; i += 2) {
@@ -290,7 +289,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -327,7 +326,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testFindNextKeyValue_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
     for (int i = 0; i < TUPLE_NUM; i++) {
@@ -361,7 +360,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -384,7 +383,7 @@ public class TestBSTIndex {
     BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "testFindNextKeyValue_" + storeType + ".idx"),
         keySchema, comp);
     reader.open();
-    scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple result;
@@ -417,7 +416,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testFindNextKeyOmittedValue_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
@@ -452,7 +451,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -475,7 +474,7 @@ public class TestBSTIndex {
     BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "testFindNextKeyOmittedValue_" + storeType + ".idx"),
         keySchema, comp);
     reader.open();
-    scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple result;
@@ -497,7 +496,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testFindMinValue" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
 
     Tuple tuple;
@@ -531,7 +530,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -556,7 +555,7 @@ public class TestBSTIndex {
     BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "testFindMinValue_" + storeType + ".idx"),
         keySchema, comp);
     reader.open();
-    scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     tuple.put(0, DatumFactory.createInt8(0));
@@ -580,7 +579,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testMinMax_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
     for (int i = 5; i < TUPLE_NUM; i += 2) {
@@ -614,7 +613,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -684,7 +683,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testConcurrentAccess_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
 
     Tuple tuple;
@@ -719,7 +718,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -764,7 +763,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testFindValueDescOrder_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
 
     Tuple tuple;
@@ -800,7 +799,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -825,7 +824,7 @@ public class TestBSTIndex {
     BSTIndexReader reader = bst.getIndexReader(new Path(testDir, "testFindValueDescOrder_" + storeType + ".idx"),
         keySchema, comp);
     reader.open();
-    scanner = FileStorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     for (int i = (TUPLE_NUM - 1); i > 0; i--) {
@@ -855,7 +854,7 @@ public class TestBSTIndex {
     meta = CatalogUtil.newTableMeta(storeType);
 
     Path tablePath = new Path(testDir, "testFindNextKeyValueDescOrder_" + storeType);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
 
     Tuple tuple;
@@ -890,7 +889,7 @@ public class TestBSTIndex {
     creater.setLoadNum(LOAD_NUM);
     creater.open();
 
-    SeekableScanner scanner = StorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    SeekableScanner scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple keyTuple;
@@ -918,7 +917,7 @@ public class TestBSTIndex {
     assertEquals(keySchema, reader.getKeySchema());
     assertEquals(comp, reader.getComparator());
 
-    scanner = StorageManager.getSeekableScanner(conf, meta, schema, tablet, schema);
+    scanner = TableSpaceManager.getSeekableScanner(conf, meta, schema, tablet, schema);
     scanner.init();
 
     Tuple result;

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
index cb94353..cebeeb2 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/index/TestSingleCSVFileBSTIndex.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.conf.TajoConf.ConfVars;
@@ -78,7 +77,7 @@ public class TestSingleCSVFileBSTIndex {
     Path tablePath = StorageUtil.concatPath(testDir, "testFindValueInSingleCSV", "table.csv");
     fs.mkdirs(tablePath.getParent());
 
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
     for (int i = 0; i < TUPLE_NUM; i++) {
@@ -167,7 +166,7 @@ public class TestSingleCSVFileBSTIndex {
     Path tablePath = StorageUtil.concatPath(testDir, "testFindNextKeyValueInSingleCSV",
         "table1.csv");
     fs.mkdirs(tablePath.getParent());
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.init();
     Tuple tuple;
     for(int i = 0 ; i < TUPLE_NUM; i ++ ) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
index 349da07..48e2db9 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/json/TestJsonSerDe.java
@@ -24,16 +24,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.common.TajoDataTypes;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
 import org.apache.tajo.datum.DatumFactory;
 import org.apache.tajo.datum.NullDatum;
-import org.apache.tajo.storage.Scanner;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
+import org.apache.tajo.storage.*;
 import org.apache.tajo.storage.fragment.FileFragment;
 import org.junit.Test;
 
@@ -73,7 +69,7 @@ public class TestJsonSerDe {
     FileSystem fs = FileSystem.getLocal(conf);
     FileStatus status = fs.getFileStatus(tablePath);
     FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     Tuple tuple = scanner.next();


[04/10] tajo git commit: TAJO-1605: Fix master build failure on jdk 1.6. (jinho)

Posted by ji...@apache.org.
TAJO-1605: Fix master build failure on jdk 1.6. (jinho)

Closes #569


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/f3acbdf5
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/f3acbdf5
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/f3acbdf5

Branch: refs/heads/index_support
Commit: f3acbdf5c69e7cf5a892ea2c07b9548c648cd64f
Parents: d3ca4bc
Author: Jinho Kim <jh...@apache.org>
Authored: Fri May 15 11:47:59 2015 +0900
Committer: Jinho Kim <jh...@apache.org>
Committed: Fri May 15 11:47:59 2015 +0900

----------------------------------------------------------------------
 CHANGES                                                  |  2 ++
 .../java/org/apache/tajo/service/HAServiceTracker.java   | 11 ++++++++---
 .../org/apache/tajo/worker/WorkerHeartbeatService.java   |  2 +-
 3 files changed, 11 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/f3acbdf5/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index da16ffe..77be589 100644
--- a/CHANGES
+++ b/CHANGES
@@ -123,6 +123,8 @@ Release 0.11.0 - unreleased
 
   BUG FIXES
 
+    TAJO-1605: Fix master build failure on jdk 1.6. (jinho)
+
     TAJO-1485: Datum 'Char' returned only 1byte.
     (Contributed by DaeMyung Kang, Committed by jihoon)
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/f3acbdf5/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java b/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
index 081b153..8c553e9 100644
--- a/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
+++ b/tajo-common/src/main/java/org/apache/tajo/service/HAServiceTracker.java
@@ -21,10 +21,9 @@ package org.apache.tajo.service;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.util.FileUtil;
 
 import javax.net.SocketFactory;
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 
@@ -59,7 +58,13 @@ public abstract class HAServiceTracker implements ServiceTracker {
     } catch (Exception e) {
       isAlive = false;
     } finally {
-      FileUtil.cleanup(LOG, socket);
+      if (socket != null) {
+        try {
+          socket.close();
+        } catch (IOException e) {
+          LOG.debug(e.getMessage(), e);
+        }
+      }
     }
     return isAlive;
   }

http://git-wip-us.apache.org/repos/asf/tajo/blob/f3acbdf5/tajo-core/src/main/java/org/apache/tajo/worker/WorkerHeartbeatService.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/worker/WorkerHeartbeatService.java b/tajo-core/src/main/java/org/apache/tajo/worker/WorkerHeartbeatService.java
index 9afee5a..bd70d59 100644
--- a/tajo-core/src/main/java/org/apache/tajo/worker/WorkerHeartbeatService.java
+++ b/tajo-core/src/main/java/org/apache/tajo/worker/WorkerHeartbeatService.java
@@ -74,12 +74,12 @@ public class WorkerHeartbeatService extends AbstractService {
     this.systemConf = (TajoConf) conf;
 
     this.connectionManager = RpcClientManager.getInstance();
+    thread = new WorkerHeartbeatThread();
     super.serviceInit(conf);
   }
 
   @Override
   public void serviceStart() throws Exception {
-    thread = new WorkerHeartbeatThread();
     thread.start();
     super.serviceStart();
   }


[03/10] tajo git commit: TAJO-1485: Datum 'Char' returned only 1byte.

Posted by ji...@apache.org.
TAJO-1485: Datum 'Char' returned only 1byte.

Signed-off-by: Jihoon Son <ji...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/d3ca4bc1
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/d3ca4bc1
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/d3ca4bc1

Branch: refs/heads/index_support
Commit: d3ca4bc1e59539896ddcb3b1075433a79f713114
Parents: 31c4630
Author: DaeMyung Kang <ch...@naver.com>
Authored: Thu May 14 16:39:13 2015 +0900
Committer: Jihoon Son <ji...@apache.org>
Committed: Thu May 14 16:39:13 2015 +0900

----------------------------------------------------------------------
 CHANGES                                         |  3 ++
 .../org/apache/tajo/storage/RowStoreUtil.java   | 24 ++++++++--
 .../ValueTooLongForTypeCharactersException.java | 27 +++++++++++
 .../tajo/engine/query/TestInsertQuery.java      | 45 +++++++++++++++++-
 .../apache/tajo/engine/util/TestTupleUtil.java  | 18 ++++++++
 .../queries/TestInsertQuery/test1_ddl.sql       |  1 +
 .../TestInsertQuery/test1_nolength_ddl.sql      |  1 +
 .../testInsertIntoSelectWithFixedSizeChar.sql   |  4 ++
 ...tIntoSelectWithFixedSizeCharWithNoLength.sql |  2 +
 .../org/apache/tajo/plan/LogicalPlanner.java    |  4 ++
 .../stream/TextFieldSerializerDeserializer.java |  8 +++-
 .../storage/BinarySerializerDeserializer.java   | 10 ++++
 .../org/apache/tajo/storage/RowStoreUtil.java   | 20 ++++++--
 .../storage/TextSerializerDeserializer.java     | 10 ++--
 .../tajo/storage/parquet/TajoWriteSupport.java  |  7 +++
 .../text/TextFieldSerializerDeserializer.java   |  8 +++-
 .../org/apache/tajo/storage/TestStorages.java   | 48 +++++++++++++++++++-
 17 files changed, 225 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index c9b2522..da16ffe 100644
--- a/CHANGES
+++ b/CHANGES
@@ -123,6 +123,9 @@ Release 0.11.0 - unreleased
 
   BUG FIXES
 
+    TAJO-1485: Datum 'Char' returned only 1byte.
+    (Contributed by DaeMyung Kang, Committed by jihoon)
+
     TAJO-1586: TajoMaster HA startup failure on Yarn. (jaehwa)
 
     TAJO-1598: TableMeta should change equals mechanism.

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-client/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
----------------------------------------------------------------------
diff --git a/tajo-client/src/main/java/org/apache/tajo/storage/RowStoreUtil.java b/tajo-client/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
index 6e16095..95dd60e 100644
--- a/tajo-client/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
+++ b/tajo-client/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
@@ -25,6 +25,7 @@ import org.apache.tajo.datum.DatumFactory;
 import org.apache.tajo.datum.IntervalDatum;
 import org.apache.tajo.exception.UnknownDataTypeException;
 import org.apache.tajo.exception.UnsupportedException;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.util.BitArray;
 
 import java.nio.ByteBuffer;
@@ -100,8 +101,9 @@ public class RowStoreUtil {
             break;
 
           case CHAR:
-            byte c = bb.get();
-            tuple.put(i, DatumFactory.createChar(c));
+            byte [] _str = new byte[type.getLength()];
+            bb.get(_str);
+            tuple.put(i, DatumFactory.createChar(_str));
             break;
 
           case INT2:
@@ -197,7 +199,17 @@ public class RowStoreUtil {
           case NULL_TYPE: nullFlags.set(i); break;
           case BOOLEAN: bb.put(tuple.get(i).asByte()); break;
           case BIT: bb.put(tuple.get(i).asByte()); break;
-          case CHAR: bb.put(tuple.get(i).asByte()); break;
+          case CHAR:
+            int charSize = col.getDataType().getLength();
+            byte [] _char = new byte[charSize];
+            byte [] src = tuple.get(i).asByteArray();
+            if (charSize < src.length) {
+              throw new ValueTooLongForTypeCharactersException(charSize);
+            }
+
+            System.arraycopy(src, 0, _char, 0, src.length);
+            bb.put(_char);
+            break;
           case INT2: bb.putShort(tuple.get(i).asInt2()); break;
           case INT4: bb.putInt(tuple.get(i).asInt4()); break;
           case INT8: bb.putLong(tuple.get(i).asInt8()); break;
@@ -259,7 +271,11 @@ public class RowStoreUtil {
         switch (col.getDataType().getType()) {
           case BOOLEAN:
           case BIT:
-          case CHAR: size += 1; break;
+            size += 1;
+            break;
+          case CHAR:
+            size += col.getDataType().getLength();
+            break;
           case INT2: size += 2; break;
           case DATE:
           case INT4:

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-common/src/main/java/org/apache/tajo/exception/ValueTooLongForTypeCharactersException.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/exception/ValueTooLongForTypeCharactersException.java b/tajo-common/src/main/java/org/apache/tajo/exception/ValueTooLongForTypeCharactersException.java
new file mode 100644
index 0000000..262b714
--- /dev/null
+++ b/tajo-common/src/main/java/org/apache/tajo/exception/ValueTooLongForTypeCharactersException.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.exception;
+
+public class ValueTooLongForTypeCharactersException extends RuntimeException {
+  private static final long serialVersionUID = -7689027447969916150L;
+
+  public ValueTooLongForTypeCharactersException(int size) {
+    super("value too long for type character(" + size + ")");
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
index b3e3402..b4334f6 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestInsertQuery.java
@@ -39,7 +39,6 @@ import java.sql.ResultSet;
 import java.util.List;
 
 import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
 
 @Category(IntegrationTest.class)
 public class TestInsertQuery extends QueryTestCaseBase {
@@ -836,4 +835,48 @@ public class TestInsertQuery extends QueryTestCaseBase {
       executeString("drop table nation_diff purge;");
     }
   }
+
+  @Test
+  public final void testFixedCharSelectWithNoLength() throws Exception {
+    ResultSet res = executeFile("test1_nolength_ddl.sql");
+    res.close();
+
+    CatalogService catalog = testingCluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(getCurrentDatabase(), "test1"));
+
+    res = executeFile("testInsertIntoSelectWithFixedSizeCharWithNoLength.sql");
+    res.close();
+
+    //remove \0
+    String resultDatas = getTableFileContents("test1").replaceAll("\0","");
+    String expected = "a\n";
+
+    assertNotNull(resultDatas);
+    assertEquals(expected.length(), resultDatas.length());
+    assertEquals(expected, resultDatas);
+    executeString("DROP TABLE test1 PURGE");
+  }
+
+  @Test
+  public final void testFixedCharSelect() throws Exception {
+    ResultSet res = executeFile("test1_ddl.sql");
+    res.close();
+
+    CatalogService catalog = testingCluster.getMaster().getCatalog();
+    assertTrue(catalog.existsTable(getCurrentDatabase(), "test1"));
+
+    res = executeFile("testInsertIntoSelectWithFixedSizeChar.sql");
+    res.close();
+
+    //remove \0
+    String resultDatas = getTableFileContents("test1").replaceAll("\0","");
+    String expected = "a\n" +
+      "abc\n" +
+      "abcde\n";
+
+    assertNotNull(resultDatas);
+    assertEquals(expected.length(), resultDatas.length());
+    assertEquals(expected, resultDatas);
+    executeString("DROP TABLE test1 PURGE");
+  }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-core/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java b/tajo-core/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
index b8114e0..c1c07b8 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/util/TestTupleUtil.java
@@ -37,6 +37,24 @@ import static org.junit.Assert.*;
 
 public class TestTupleUtil {
   @Test
+  public final void testFixedSizeChar() {
+    Schema schema = new Schema();
+    schema.addColumn("col1", Type.CHAR, 5);
+
+    Tuple tuple = new VTuple(1);
+    tuple.put(new Datum[] {
+      DatumFactory.createChar("abc\0\0")
+    });
+
+    RowStoreEncoder encoder = RowStoreUtil.createEncoder(schema);
+    RowStoreDecoder decoder = RowStoreUtil.createDecoder(schema);
+    byte [] bytes = encoder.toBytes(tuple);
+    Tuple tuple2 = decoder.toTuple(bytes);
+
+    assertEquals(tuple, tuple2);
+  }
+
+  @Test
   public final void testToBytesAndToTuple() {
     Schema schema = new Schema();
     schema.addColumn("col1", Type.BOOLEAN);

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-core/src/test/resources/queries/TestInsertQuery/test1_ddl.sql
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/resources/queries/TestInsertQuery/test1_ddl.sql b/tajo-core/src/test/resources/queries/TestInsertQuery/test1_ddl.sql
new file mode 100644
index 0000000..c02b080
--- /dev/null
+++ b/tajo-core/src/test/resources/queries/TestInsertQuery/test1_ddl.sql
@@ -0,0 +1 @@
+create table test1 (col1 char(5));
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-core/src/test/resources/queries/TestInsertQuery/test1_nolength_ddl.sql
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/resources/queries/TestInsertQuery/test1_nolength_ddl.sql b/tajo-core/src/test/resources/queries/TestInsertQuery/test1_nolength_ddl.sql
new file mode 100644
index 0000000..cbe3654
--- /dev/null
+++ b/tajo-core/src/test/resources/queries/TestInsertQuery/test1_nolength_ddl.sql
@@ -0,0 +1 @@
+create table test1 (col1 char);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeChar.sql
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeChar.sql b/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeChar.sql
new file mode 100644
index 0000000..f7ec11c
--- /dev/null
+++ b/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeChar.sql
@@ -0,0 +1,4 @@
+insert into test1 select 'a';
+insert into test1 select 'abc';
+insert into test1 select 'abcde';
+select * from test1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeCharWithNoLength.sql
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeCharWithNoLength.sql b/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeCharWithNoLength.sql
new file mode 100644
index 0000000..02a1d6c
--- /dev/null
+++ b/tajo-core/src/test/resources/queries/TestInsertQuery/testInsertIntoSelectWithFixedSizeCharWithNoLength.sql
@@ -0,0 +1,2 @@
+insert into test1 select 'a';
+select * from test1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
index ce1e4ec..cec0760 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
@@ -1913,6 +1913,10 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
 
     if (dataType.hasLengthOrPrecision()) {
       builder.setLength(dataType.getLengthOrPrecision());
+    } else {
+      if (type == TajoDataTypes.Type.CHAR) {
+        builder.setLength(1);
+      }
     }
 
     TypeDesc typeDesc;

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-plan/src/main/java/org/apache/tajo/plan/function/stream/TextFieldSerializerDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/function/stream/TextFieldSerializerDeserializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/function/stream/TextFieldSerializerDeserializer.java
index b6d5020..879373b 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/function/stream/TextFieldSerializerDeserializer.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/function/stream/TextFieldSerializerDeserializer.java
@@ -29,6 +29,7 @@ import org.apache.tajo.common.TajoDataTypes;
 import org.apache.tajo.datum.*;
 import org.apache.tajo.datum.protobuf.ProtobufJsonFormat;
 import org.apache.tajo.exception.UnsupportedException;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.storage.StorageConstants;
 import org.apache.tajo.util.NumberUtil;
 
@@ -84,7 +85,12 @@ public class TextFieldSerializerDeserializer implements FieldSerializerDeseriali
         length = trueBytes.length;
         break;
       case CHAR:
-        byte[] pad = new byte[dataType.getLength() - datum.size()];
+        int size = dataType.getLength() - datum.size();
+        if (size < 0){
+          throw new ValueTooLongForTypeCharactersException(dataType.getLength());
+        }
+
+        byte[] pad = new byte[size];
         bytes = datum.asTextBytes();
         out.write(bytes);
         out.write(pad);

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BinarySerializerDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BinarySerializerDeserializer.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BinarySerializerDeserializer.java
index 00112e7..a3b8da8 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BinarySerializerDeserializer.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/BinarySerializerDeserializer.java
@@ -22,6 +22,7 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.Message;
 import org.apache.tajo.catalog.Column;
 import org.apache.tajo.datum.*;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.util.Bytes;
 
 import java.io.IOException;
@@ -44,9 +45,18 @@ public class BinarySerializerDeserializer implements SerializerDeserializer {
     switch (col.getDataType().getType()) {
       case BOOLEAN:
       case BIT:
+        bytes = datum.asByteArray();
+        length = bytes.length;
+        out.write(bytes, 0, length);
+				break;
+
       case CHAR:
         bytes = datum.asByteArray();
         length = bytes.length;
+        if (length > col.getDataType().getLength()) {
+          throw new ValueTooLongForTypeCharactersException(col.getDataType().getLength());
+        }
+
         out.write(bytes, 0, length);
         break;
       case INT2:

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/RowStoreUtil.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
index 9d69423..256bc78 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/RowStoreUtil.java
@@ -26,6 +26,7 @@ import org.apache.tajo.datum.IntervalDatum;
 import org.apache.tajo.datum.ProtobufDatum;
 import org.apache.tajo.exception.UnknownDataTypeException;
 import org.apache.tajo.exception.UnsupportedException;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.tuple.offheap.RowWriter;
 import org.apache.tajo.util.BitArray;
 
@@ -99,8 +100,9 @@ public class RowStoreUtil {
             break;
 
           case CHAR:
-            byte c = bb.get();
-            tuple.put(i, DatumFactory.createChar(c));
+            byte [] _str = new byte[type.getLength()];
+            bb.get(_str);
+            tuple.put(i, DatumFactory.createChar(_str));
             break;
 
           case INT2:
@@ -204,7 +206,15 @@ public class RowStoreUtil {
           bb.put(tuple.get(i).asByte());
           break;
         case CHAR:
-          bb.put(tuple.get(i).asByte());
+          int charSize = col.getDataType().getLength();
+          byte [] _char = new byte[charSize];
+          byte [] src = tuple.get(i).asByteArray();
+          if (charSize < src.length) {
+            throw new ValueTooLongForTypeCharactersException(charSize);
+          }
+
+          System.arraycopy(src, 0, _char, 0, src.length);
+          bb.put(_char);
           break;
         case INT2:
           bb.putShort(tuple.get(i).asInt2());
@@ -281,9 +291,11 @@ public class RowStoreUtil {
         switch (col.getDataType().getType()) {
         case BOOLEAN:
         case BIT:
-        case CHAR:
           size += 1;
           break;
+        case CHAR:
+          size += col.getDataType().getLength();
+          break;
         case INT2:
           size += 2;
           break;

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TextSerializerDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TextSerializerDeserializer.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TextSerializerDeserializer.java
index ab8816b..954b62d 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TextSerializerDeserializer.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TextSerializerDeserializer.java
@@ -20,12 +20,11 @@ package org.apache.tajo.storage;
 
 import com.google.protobuf.Message;
 import org.apache.commons.codec.binary.Base64;
-import org.apache.tajo.TajoConstants;
 import org.apache.tajo.catalog.Column;
 import org.apache.tajo.common.TajoDataTypes;
-import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.*;
 import org.apache.tajo.datum.protobuf.ProtobufJsonFormat;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.util.Bytes;
 import org.apache.tajo.util.NumberUtil;
 
@@ -66,7 +65,12 @@ public class TextSerializerDeserializer implements SerializerDeserializer {
         length = trueBytes.length;
         break;
       case CHAR:
-        byte[] pad = new byte[dataType.getLength() - datum.size()];
+        int size = dataType.getLength() - datum.size();
+        if (size < 0){
+          throw new ValueTooLongForTypeCharactersException(dataType.getLength());
+        }
+
+        byte[] pad = new byte[size];
         bytes = datum.asTextBytes();
         out.write(bytes);
         out.write(pad);

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/parquet/TajoWriteSupport.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/parquet/TajoWriteSupport.java b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/parquet/TajoWriteSupport.java
index e05aeaf..dd951e1 100644
--- a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/parquet/TajoWriteSupport.java
+++ b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/parquet/TajoWriteSupport.java
@@ -23,6 +23,7 @@ import org.apache.tajo.catalog.Column;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.common.TajoDataTypes;
 import org.apache.tajo.datum.Datum;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.storage.Tuple;
 import parquet.hadoop.api.WriteSupport;
 import parquet.io.api.Binary;
@@ -132,6 +133,12 @@ public class TajoWriteSupport extends WriteSupport<Tuple> {
         recordConsumer.addDouble(datum.asFloat8());
         break;
       case CHAR:
+        if (datum.size() > column.getDataType().getLength()) {
+          throw new ValueTooLongForTypeCharactersException(column.getDataType().getLength());
+        }
+
+        recordConsumer.addBinary(Binary.fromByteArray(datum.asTextBytes()));
+        break;
       case TEXT:
         recordConsumer.addBinary(Binary.fromByteArray(datum.asTextBytes()));
         break;

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/text/TextFieldSerializerDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/text/TextFieldSerializerDeserializer.java b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/text/TextFieldSerializerDeserializer.java
index e637c7f..d2eee9f 100644
--- a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/text/TextFieldSerializerDeserializer.java
+++ b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/text/TextFieldSerializerDeserializer.java
@@ -28,6 +28,7 @@ import org.apache.tajo.catalog.TableMeta;
 import org.apache.tajo.common.TajoDataTypes;
 import org.apache.tajo.datum.*;
 import org.apache.tajo.datum.protobuf.ProtobufJsonFormat;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.storage.FieldSerializerDeserializer;
 import org.apache.tajo.storage.StorageConstants;
 import org.apache.tajo.util.Bytes;
@@ -86,7 +87,12 @@ public class TextFieldSerializerDeserializer implements FieldSerializerDeseriali
         length = trueBytes.length;
         break;
       case CHAR:
-        byte[] pad = new byte[dataType.getLength() - datum.size()];
+        int size = dataType.getLength() - datum.size();
+        if (size < 0){
+          throw new ValueTooLongForTypeCharactersException(dataType.getLength());
+        }
+
+        byte[] pad = new byte[size];
         bytes = datum.asTextBytes();
         out.write(bytes);
         out.write(pad);

http://git-wip-us.apache.org/repos/asf/tajo/blob/d3ca4bc1/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
index 949e2e8..6a0080c 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestStorages.java
@@ -30,7 +30,6 @@ import org.apache.tajo.TajoIdProtos;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
@@ -38,6 +37,7 @@ import org.apache.tajo.datum.Datum;
 import org.apache.tajo.datum.DatumFactory;
 import org.apache.tajo.datum.NullDatum;
 import org.apache.tajo.datum.ProtobufDatumFactory;
+import org.apache.tajo.exception.ValueTooLongForTypeCharactersException;
 import org.apache.tajo.storage.fragment.FileFragment;
 import org.apache.tajo.storage.rcfile.RCFile;
 import org.apache.tajo.storage.sequencefile.SequenceFileScanner;
@@ -1023,4 +1023,50 @@ public class TestStorages {
       assertEquals(NullDatum.get(), tuple.get(4));
     }
   }
+
+  @Test
+  public final void testInsertFixedCharTypeWithOverSize() throws Exception {
+    if (storeType.equalsIgnoreCase("CSV") == false &&
+        storeType.equalsIgnoreCase("SEQUENCEFILE") == false &&
+        storeType.equalsIgnoreCase("RCFILE") == false &&
+        storeType.equalsIgnoreCase("PARQUET") == false) {
+      return;
+    }
+
+    Schema dataSchema = new Schema();
+    dataSchema.addColumn("col1", Type.CHAR);
+
+    KeyValueSet options = new KeyValueSet();
+    TableMeta meta = CatalogUtil.newTableMeta(storeType, options);
+    meta.setOptions(CatalogUtil.newPhysicalProperties(storeType));
+
+    Path tablePath = new Path(testDir, "test_storetype_oversize.data");
+    FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(conf);
+    Appender appender = sm.getAppender(meta, dataSchema, tablePath);
+    appender.init();
+
+    Tuple expect = new VTuple(dataSchema.size());
+    expect.put(new Datum[]{
+        DatumFactory.createChar("1"),
+    });
+
+    appender.addTuple(expect);
+    appender.flush();
+
+    Tuple expect2 = new VTuple(dataSchema.size());
+    expect2.put(new Datum[]{
+        DatumFactory.createChar("12"),
+    });
+
+    boolean ok = false;
+    try {
+      appender.addTuple(expect2);
+      appender.flush();
+      appender.close();
+    } catch (ValueTooLongForTypeCharactersException e) {
+      ok = true;
+    }
+
+    assertTrue(ok);
+  }
 }


[08/10] tajo git commit: TAJO-1603: Refactor StorageManager. (hyunsik)

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
index 24d2dfa..0751035 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
@@ -18,33 +18,28 @@
 
 package org.apache.tajo.storage;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
-import org.apache.tajo.*;
-import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.tajo.ExecutionBlockId;
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.TajoConstants;
+import org.apache.tajo.TaskAttemptId;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.SortSpec;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.TableMeta;
 import org.apache.tajo.catalog.proto.CatalogProtos.FragmentProto;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.conf.TajoConf.ConfVars;
 import org.apache.tajo.plan.LogicalPlan;
 import org.apache.tajo.plan.logical.LogicalNode;
-import org.apache.tajo.plan.logical.NodeType;
 import org.apache.tajo.plan.logical.ScanNode;
 import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.storage.fragment.FragmentConvertor;
-import org.apache.tajo.util.TUtil;
 
 import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.text.NumberFormat;
 import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * StorageManager manages the functions of storing and reading data.
@@ -52,23 +47,7 @@ import java.util.concurrent.ConcurrentHashMap;
  * For supporting such as HDFS, HBASE, a specific StorageManager should be implemented by inheriting this class.
  *
  */
-public abstract class StorageManager {
-  private final Log LOG = LogFactory.getLog(StorageManager.class);
-
-  private static final Class<?>[] DEFAULT_SCANNER_PARAMS = {
-      Configuration.class,
-      Schema.class,
-      TableMeta.class,
-      Fragment.class
-  };
-
-  private static final Class<?>[] DEFAULT_APPENDER_PARAMS = {
-      Configuration.class,
-      TaskAttemptId.class,
-      Schema.class,
-      TableMeta.class,
-      Path.class
-  };
+public abstract class StorageManager implements TableSpace {
 
   public static final PathFilter hiddenFileFilter = new PathFilter() {
     public boolean accept(Path p) {
@@ -80,31 +59,6 @@ public abstract class StorageManager {
   protected TajoConf conf;
   protected String storeType;
 
-  /**
-   * Cache of StorageManager.
-   * Key is manager key(warehouse path) + store type
-   */
-  private static final Map<String, StorageManager> storageManagers = Maps.newHashMap();
-
-  /**
-   * Cache of scanner handlers for each storage type.
-   */
-  protected static final Map<String, Class<? extends Scanner>> SCANNER_HANDLER_CACHE
-      = new ConcurrentHashMap<String, Class<? extends Scanner>>();
-
-  /**
-   * Cache of appender handlers for each storage type.
-   */
-  protected static final Map<String, Class<? extends Appender>> APPENDER_HANDLER_CACHE
-      = new ConcurrentHashMap<String, Class<? extends Appender>>();
-
-  /**
-   * Cache of constructors for each class. Pins the classes so they
-   * can't be garbage collected until ReflectionUtils can be collected.
-   */
-  private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE =
-      new ConcurrentHashMap<Class<?>, Constructor<?>>();
-
   public StorageManager(String storeType) {
     this.storeType = storeType;
   }
@@ -123,6 +77,7 @@ public abstract class StorageManager {
    * @param ifNotExists Creates the table only when the table does not exist.
    * @throws java.io.IOException
    */
+  @Override
   public abstract void createTable(TableDesc tableDesc, boolean ifNotExists) throws IOException;
 
   /**
@@ -132,6 +87,7 @@ public abstract class StorageManager {
    * @param tableDesc
    * @throws java.io.IOException
    */
+  @Override
   public abstract void purgeTable(TableDesc tableDesc) throws IOException;
 
   /**
@@ -143,6 +99,7 @@ public abstract class StorageManager {
    * @return The list of input fragments.
    * @throws java.io.IOException
    */
+  @Override
   public abstract List<Fragment> getSplits(String fragmentId, TableDesc tableDesc,
                                            ScanNode scanNode) throws IOException;
 
@@ -167,21 +124,11 @@ public abstract class StorageManager {
   /**
    * Release storage manager resource
    */
-  public abstract void closeStorageManager();
+  @Override
+  public abstract void close();
 
 
   /**
-   * Clear all class cache
-   */
-  @VisibleForTesting
-  protected synchronized static void clearCache() {
-    CONSTRUCTOR_CACHE.clear();
-    SCANNER_HANDLER_CACHE.clear();
-    APPENDER_HANDLER_CACHE.clear();
-    storageManagers.clear();
-  }
-
-  /**
    * It is called by a Repartitioner for range shuffling when the SortRangeType of SortNode is USING_STORAGE_MANAGER.
    * In general Repartitioner determines the partition range using previous output statistics data.
    * In the special cases, such as HBase Repartitioner uses the result of this method.
@@ -237,19 +184,6 @@ public abstract class StorageManager {
   }
 
   /**
-   * Close StorageManager
-   * @throws java.io.IOException
-   */
-  public static void close() throws IOException {
-    synchronized(storageManagers) {
-      for (StorageManager eachStorageManager: storageManagers.values()) {
-        eachStorageManager.closeStorageManager();
-      }
-    }
-    clearCache();
-  }
-
-  /**
    * Returns the splits that will serve as input for the scan tasks. The
    * number of splits matches the number of regions in a table.
    *
@@ -263,85 +197,6 @@ public abstract class StorageManager {
   }
 
   /**
-   * Returns FileStorageManager instance.
-   *
-   * @param tajoConf Tajo system property.
-   * @return
-   * @throws java.io.IOException
-   */
-  public static StorageManager getFileStorageManager(TajoConf tajoConf) throws IOException {
-    return getStorageManager(tajoConf, "CSV");
-  }
-
-  /**
-   * Returns the proper StorageManager instance according to the storeType.
-   *
-   * @param tajoConf Tajo system property.
-   * @param storeType Storage type
-   * @return
-   * @throws java.io.IOException
-   */
-  public static StorageManager getStorageManager(TajoConf tajoConf, String storeType) throws IOException {
-    FileSystem fileSystem = TajoConf.getWarehouseDir(tajoConf).getFileSystem(tajoConf);
-    if (fileSystem != null) {
-      return getStorageManager(tajoConf, storeType, fileSystem.getUri().toString());
-    } else {
-      return getStorageManager(tajoConf, storeType, null);
-    }
-  }
-
-  /**
-   * Returns the proper StorageManager instance according to the storeType
-   *
-   * @param tajoConf Tajo system property.
-   * @param storeType Storage type
-   * @param managerKey Key that can identify each storage manager(may be a path)
-   * @return
-   * @throws java.io.IOException
-   */
-  private static synchronized StorageManager getStorageManager (
-      TajoConf tajoConf, String storeType, String managerKey) throws IOException {
-
-    String typeName;
-    if (storeType.equalsIgnoreCase("HBASE")) {
-      typeName = "hbase";
-    } else {
-      typeName = "hdfs";
-    }
-
-    synchronized (storageManagers) {
-      String storeKey = typeName + "_" + managerKey;
-      StorageManager manager = storageManagers.get(storeKey);
-
-      if (manager == null) {
-        Class<? extends StorageManager> storageManagerClass =
-            tajoConf.getClass(String.format("tajo.storage.manager.%s.class", typeName), null, StorageManager.class);
-
-        if (storageManagerClass == null) {
-          throw new IOException("Unknown Storage Type: " + typeName);
-        }
-
-        try {
-          Constructor<? extends StorageManager> constructor =
-              (Constructor<? extends StorageManager>) CONSTRUCTOR_CACHE.get(storageManagerClass);
-          if (constructor == null) {
-            constructor = storageManagerClass.getDeclaredConstructor(new Class<?>[]{String.class});
-            constructor.setAccessible(true);
-            CONSTRUCTOR_CACHE.put(storageManagerClass, constructor);
-          }
-          manager = constructor.newInstance(new Object[]{storeType});
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-        manager.init(tajoConf);
-        storageManagers.put(storeKey, manager);
-      }
-
-      return manager;
-    }
-  }
-
-  /**
    * Returns Scanner instance.
    *
    * @param meta The table meta
@@ -351,6 +206,7 @@ public abstract class StorageManager {
    * @return Scanner instance
    * @throws java.io.IOException
    */
+  @Override
   public Scanner getScanner(TableMeta meta, Schema schema, FragmentProto fragment, Schema target) throws IOException {
     return getScanner(meta, schema, FragmentConvertor.convert(conf, fragment), target);
   }
@@ -364,6 +220,7 @@ public abstract class StorageManager {
    * @return Scanner instance
    * @throws java.io.IOException
    */
+  @Override
   public Scanner getScanner(TableMeta meta, Schema schema, Fragment fragment) throws IOException {
     return getScanner(meta, schema, fragment, schema);
   }
@@ -378,6 +235,7 @@ public abstract class StorageManager {
    * @return Scanner instance
    * @throws java.io.IOException
    */
+  @Override
   public Scanner getScanner(TableMeta meta, Schema schema, Fragment fragment, Schema target) throws IOException {
     if (fragment.isEmpty()) {
       Scanner scanner = new NullScanner(conf, schema, meta, fragment);
@@ -389,29 +247,13 @@ public abstract class StorageManager {
     Scanner scanner;
 
     Class<? extends Scanner> scannerClass = getScannerClass(meta.getStoreType());
-    scanner = newScannerInstance(scannerClass, conf, schema, meta, fragment);
+    scanner = TableSpaceManager.newScannerInstance(scannerClass, conf, schema, meta, fragment);
     scanner.setTarget(target.toArray());
 
     return scanner;
   }
 
   /**
-   * Returns Scanner instance.
-   *
-   * @param conf The system property
-   * @param meta The table meta
-   * @param schema The input schema
-   * @param fragment The fragment for scanning
-   * @param target The output schema
-   * @return Scanner instance
-   * @throws java.io.IOException
-   */
-  public static synchronized SeekableScanner getSeekableScanner(
-      TajoConf conf, TableMeta meta, Schema schema, Fragment fragment, Schema target) throws IOException {
-    return (SeekableScanner)getStorageManager(conf, meta.getStoreType()).getScanner(meta, schema, fragment, target);
-  }
-
-  /**
    * Returns Appender instance.
    * @param queryContext Query property.
    * @param taskAttemptId Task id.
@@ -429,82 +271,23 @@ public abstract class StorageManager {
     Class<? extends Appender> appenderClass;
 
     String handlerName = meta.getStoreType().toLowerCase();
-    appenderClass = APPENDER_HANDLER_CACHE.get(handlerName);
+    appenderClass = TableSpaceManager.APPENDER_HANDLER_CACHE.get(handlerName);
     if (appenderClass == null) {
       appenderClass = conf.getClass(
           String.format("tajo.storage.appender-handler.%s.class", handlerName), null, Appender.class);
-      APPENDER_HANDLER_CACHE.put(handlerName, appenderClass);
+      TableSpaceManager.APPENDER_HANDLER_CACHE.put(handlerName, appenderClass);
     }
 
     if (appenderClass == null) {
       throw new IOException("Unknown Storage Type: " + meta.getStoreType());
     }
 
-    appender = newAppenderInstance(appenderClass, conf, taskAttemptId, meta, schema, workDir);
+    appender = TableSpaceManager.newAppenderInstance(appenderClass, conf, taskAttemptId, meta, schema, workDir);
 
     return appender;
   }
 
   /**
-   * Creates a scanner instance.
-   *
-   * @param theClass Concrete class of scanner
-   * @param conf System property
-   * @param schema Input schema
-   * @param meta Table meta data
-   * @param fragment The fragment for scanning
-   * @param <T>
-   * @return The scanner instance
-   */
-  public static <T> T newScannerInstance(Class<T> theClass, Configuration conf, Schema schema, TableMeta meta,
-                                         Fragment fragment) {
-    T result;
-    try {
-      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
-      if (meth == null) {
-        meth = theClass.getDeclaredConstructor(DEFAULT_SCANNER_PARAMS);
-        meth.setAccessible(true);
-        CONSTRUCTOR_CACHE.put(theClass, meth);
-      }
-      result = meth.newInstance(new Object[]{conf, schema, meta, fragment});
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-
-    return result;
-  }
-
-  /**
-   * Creates a scanner instance.
-   *
-   * @param theClass Concrete class of scanner
-   * @param conf System property
-   * @param taskAttemptId Task id
-   * @param meta Table meta data
-   * @param schema Input schema
-   * @param workDir Working directory
-   * @param <T>
-   * @return The scanner instance
-   */
-  public static <T> T newAppenderInstance(Class<T> theClass, Configuration conf, TaskAttemptId taskAttemptId,
-                                          TableMeta meta, Schema schema, Path workDir) {
-    T result;
-    try {
-      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
-      if (meth == null) {
-        meth = theClass.getDeclaredConstructor(DEFAULT_APPENDER_PARAMS);
-        meth.setAccessible(true);
-        CONSTRUCTOR_CACHE.put(theClass, meth);
-      }
-      result = meth.newInstance(new Object[]{conf, taskAttemptId, schema, meta, workDir});
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-
-    return result;
-  }
-
-  /**
    * Return the Scanner class for the StoreType that is defined in storage-default.xml.
    *
    * @param storeType store type
@@ -513,11 +296,11 @@ public abstract class StorageManager {
    */
   public Class<? extends Scanner> getScannerClass(String storeType) throws IOException {
     String handlerName = storeType.toLowerCase();
-    Class<? extends Scanner> scannerClass = SCANNER_HANDLER_CACHE.get(handlerName);
+    Class<? extends Scanner> scannerClass = TableSpaceManager.SCANNER_HANDLER_CACHE.get(handlerName);
     if (scannerClass == null) {
       scannerClass = conf.getClass(
           String.format("tajo.storage.scanner-handler.%s.class", handlerName), null, Scanner.class);
-      SCANNER_HANDLER_CACHE.put(handlerName, scannerClass);
+      TableSpaceManager.SCANNER_HANDLER_CACHE.put(handlerName, scannerClass);
     }
 
     if (scannerClass == null) {
@@ -550,6 +333,7 @@ public abstract class StorageManager {
    * @param outSchema  The output schema of select query for inserting.
    * @throws java.io.IOException
    */
+  @Override
   public void verifyInsertTableSchema(TableDesc tableDesc, Schema outSchema) throws IOException {
     // nothing to do
   }
@@ -563,7 +347,9 @@ public abstract class StorageManager {
    * @return The list of storage specified rewrite rules
    * @throws java.io.IOException
    */
-  public List<LogicalPlanRewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc) throws IOException {
+  @Override
+  public List<LogicalPlanRewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc)
+      throws IOException {
     return null;
   }
 
@@ -580,375 +366,8 @@ public abstract class StorageManager {
    * @return Saved path
    * @throws java.io.IOException
    */
-  public Path commitOutputData(OverridableConf queryContext, ExecutionBlockId finalEbId,
+  @Override
+  public abstract Path commitOutputData(OverridableConf queryContext, ExecutionBlockId finalEbId,
                                LogicalPlan plan, Schema schema,
-                               TableDesc tableDesc) throws IOException {
-    return commitOutputData(queryContext, finalEbId, plan, schema, tableDesc, true);
-  }
-
-  /**
-   * Finalizes result data. Tajo stores result data in the staging directory.
-   * If the query fails, clean up the staging directory.
-   * Otherwise the query is successful, move to the final directory from the staging directory.
-   *
-   * @param queryContext The query property
-   * @param finalEbId The final execution block id
-   * @param plan The query plan
-   * @param schema The final output schema
-   * @param tableDesc The description of the target table
-   * @param changeFileSeq If true change result file name with max sequence.
-   * @return Saved path
-   * @throws java.io.IOException
-   */
-  protected Path commitOutputData(OverridableConf queryContext, ExecutionBlockId finalEbId,
-                               LogicalPlan plan, Schema schema,
-                               TableDesc tableDesc, boolean changeFileSeq) throws IOException {
-    Path stagingDir = new Path(queryContext.get(QueryVars.STAGING_DIR));
-    Path stagingResultDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
-    Path finalOutputDir;
-    if (!queryContext.get(QueryVars.OUTPUT_TABLE_PATH, "").isEmpty()) {
-      finalOutputDir = new Path(queryContext.get(QueryVars.OUTPUT_TABLE_PATH));
-      try {
-        FileSystem fs = stagingResultDir.getFileSystem(conf);
-
-        if (queryContext.getBool(QueryVars.OUTPUT_OVERWRITE, false)) { // INSERT OVERWRITE INTO
-
-          // It moves the original table into the temporary location.
-          // Then it moves the new result table into the original table location.
-          // Upon failed, it recovers the original table if possible.
-          boolean movedToOldTable = false;
-          boolean committed = false;
-          Path oldTableDir = new Path(stagingDir, TajoConstants.INSERT_OVERWIRTE_OLD_TABLE_NAME);
-          ContentSummary summary = fs.getContentSummary(stagingResultDir);
-
-          if (!queryContext.get(QueryVars.OUTPUT_PARTITIONS, "").isEmpty() && summary.getFileCount() > 0L) {
-            // This is a map for existing non-leaf directory to rename. A key is current directory and a value is
-            // renaming directory.
-            Map<Path, Path> renameDirs = TUtil.newHashMap();
-            // This is a map for recovering existing partition directory. A key is current directory and a value is
-            // temporary directory to back up.
-            Map<Path, Path> recoveryDirs = TUtil.newHashMap();
-
-            try {
-              if (!fs.exists(finalOutputDir)) {
-                fs.mkdirs(finalOutputDir);
-              }
-
-              visitPartitionedDirectory(fs, stagingResultDir, finalOutputDir, stagingResultDir.toString(),
-                  renameDirs, oldTableDir);
-
-              // Rename target partition directories
-              for(Map.Entry<Path, Path> entry : renameDirs.entrySet()) {
-                // Backup existing data files for recovering
-                if (fs.exists(entry.getValue())) {
-                  String recoveryPathString = entry.getValue().toString().replaceAll(finalOutputDir.toString(),
-                      oldTableDir.toString());
-                  Path recoveryPath = new Path(recoveryPathString);
-                  fs.rename(entry.getValue(), recoveryPath);
-                  fs.exists(recoveryPath);
-                  recoveryDirs.put(entry.getValue(), recoveryPath);
-                }
-                // Delete existing directory
-                fs.delete(entry.getValue(), true);
-                // Rename staging directory to final output directory
-                fs.rename(entry.getKey(), entry.getValue());
-              }
-
-            } catch (IOException ioe) {
-              // Remove created dirs
-              for(Map.Entry<Path, Path> entry : renameDirs.entrySet()) {
-                fs.delete(entry.getValue(), true);
-              }
-
-              // Recovery renamed dirs
-              for(Map.Entry<Path, Path> entry : recoveryDirs.entrySet()) {
-                fs.delete(entry.getValue(), true);
-                fs.rename(entry.getValue(), entry.getKey());
-              }
-
-              throw new IOException(ioe.getMessage());
-            }
-          } else { // no partition
-            try {
-
-              // if the final output dir exists, move all contents to the temporary table dir.
-              // Otherwise, just make the final output dir. As a result, the final output dir will be empty.
-              if (fs.exists(finalOutputDir)) {
-                fs.mkdirs(oldTableDir);
-
-                for (FileStatus status : fs.listStatus(finalOutputDir, StorageManager.hiddenFileFilter)) {
-                  fs.rename(status.getPath(), oldTableDir);
-                }
-
-                movedToOldTable = fs.exists(oldTableDir);
-              } else { // if the parent does not exist, make its parent directory.
-                fs.mkdirs(finalOutputDir);
-              }
-
-              // Move the results to the final output dir.
-              for (FileStatus status : fs.listStatus(stagingResultDir)) {
-                fs.rename(status.getPath(), finalOutputDir);
-              }
-
-              // Check the final output dir
-              committed = fs.exists(finalOutputDir);
-
-            } catch (IOException ioe) {
-              // recover the old table
-              if (movedToOldTable && !committed) {
-
-                // if commit is failed, recover the old data
-                for (FileStatus status : fs.listStatus(finalOutputDir, StorageManager.hiddenFileFilter)) {
-                  fs.delete(status.getPath(), true);
-                }
-
-                for (FileStatus status : fs.listStatus(oldTableDir)) {
-                  fs.rename(status.getPath(), finalOutputDir);
-                }
-              }
-
-              throw new IOException(ioe.getMessage());
-            }
-          }
-        } else {
-          String queryType = queryContext.get(QueryVars.COMMAND_TYPE);
-
-          if (queryType != null && queryType.equals(NodeType.INSERT.name())) { // INSERT INTO an existing table
-
-            NumberFormat fmt = NumberFormat.getInstance();
-            fmt.setGroupingUsed(false);
-            fmt.setMinimumIntegerDigits(3);
-
-            if (!queryContext.get(QueryVars.OUTPUT_PARTITIONS, "").isEmpty()) {
-              for(FileStatus eachFile: fs.listStatus(stagingResultDir)) {
-                if (eachFile.isFile()) {
-                  LOG.warn("Partition table can't have file in a staging dir: " + eachFile.getPath());
-                  continue;
-                }
-                moveResultFromStageToFinal(fs, stagingResultDir, eachFile, finalOutputDir, fmt, -1, changeFileSeq);
-              }
-            } else {
-              int maxSeq = StorageUtil.getMaxFileSequence(fs, finalOutputDir, false) + 1;
-              for(FileStatus eachFile: fs.listStatus(stagingResultDir)) {
-                if (eachFile.getPath().getName().startsWith("_")) {
-                  continue;
-                }
-                moveResultFromStageToFinal(fs, stagingResultDir, eachFile, finalOutputDir, fmt, maxSeq++, changeFileSeq);
-              }
-            }
-            // checking all file moved and remove empty dir
-            verifyAllFileMoved(fs, stagingResultDir);
-            FileStatus[] files = fs.listStatus(stagingResultDir);
-            if (files != null && files.length != 0) {
-              for (FileStatus eachFile: files) {
-                LOG.error("There are some unmoved files in staging dir:" + eachFile.getPath());
-              }
-            }
-          } else { // CREATE TABLE AS SELECT (CTAS)
-            if (fs.exists(finalOutputDir)) {
-              for (FileStatus status : fs.listStatus(stagingResultDir)) {
-                fs.rename(status.getPath(), finalOutputDir);
-              }
-            } else {
-              fs.rename(stagingResultDir, finalOutputDir);
-            }
-            LOG.info("Moved from the staging dir to the output directory '" + finalOutputDir);
-          }
-        }
-
-        // remove the staging directory if the final output dir is given.
-        Path stagingDirRoot = stagingDir.getParent();
-        fs.delete(stagingDirRoot, true);
-      } catch (Throwable t) {
-        LOG.error(t);
-        throw new IOException(t);
-      }
-    } else {
-      finalOutputDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
-    }
-
-    return finalOutputDir;
-  }
-
-  /**
-   * Attach the sequence number to the output file name and than move the file into the final result path.
-   *
-   * @param fs FileSystem
-   * @param stagingResultDir The staging result dir
-   * @param fileStatus The file status
-   * @param finalOutputPath Final output path
-   * @param nf Number format
-   * @param fileSeq The sequence number
-   * @throws java.io.IOException
-   */
-  private void moveResultFromStageToFinal(FileSystem fs, Path stagingResultDir,
-                                          FileStatus fileStatus, Path finalOutputPath,
-                                          NumberFormat nf,
-                                          int fileSeq, boolean changeFileSeq) throws IOException {
-    if (fileStatus.isDirectory()) {
-      String subPath = extractSubPath(stagingResultDir, fileStatus.getPath());
-      if (subPath != null) {
-        Path finalSubPath = new Path(finalOutputPath, subPath);
-        if (!fs.exists(finalSubPath)) {
-          fs.mkdirs(finalSubPath);
-        }
-        int maxSeq = StorageUtil.getMaxFileSequence(fs, finalSubPath, false);
-        for (FileStatus eachFile : fs.listStatus(fileStatus.getPath())) {
-          if (eachFile.getPath().getName().startsWith("_")) {
-            continue;
-          }
-          moveResultFromStageToFinal(fs, stagingResultDir, eachFile, finalOutputPath, nf, ++maxSeq, changeFileSeq);
-        }
-      } else {
-        throw new IOException("Wrong staging dir:" + stagingResultDir + "," + fileStatus.getPath());
-      }
-    } else {
-      String subPath = extractSubPath(stagingResultDir, fileStatus.getPath());
-      if (subPath != null) {
-        Path finalSubPath = new Path(finalOutputPath, subPath);
-        if (changeFileSeq) {
-          finalSubPath = new Path(finalSubPath.getParent(), replaceFileNameSeq(finalSubPath, fileSeq, nf));
-        }
-        if (!fs.exists(finalSubPath.getParent())) {
-          fs.mkdirs(finalSubPath.getParent());
-        }
-        if (fs.exists(finalSubPath)) {
-          throw new IOException("Already exists data file:" + finalSubPath);
-        }
-        boolean success = fs.rename(fileStatus.getPath(), finalSubPath);
-        if (success) {
-          LOG.info("Moving staging file[" + fileStatus.getPath() + "] + " +
-              "to final output[" + finalSubPath + "]");
-        } else {
-          LOG.error("Can't move staging file[" + fileStatus.getPath() + "] + " +
-              "to final output[" + finalSubPath + "]");
-        }
-      }
-    }
-  }
-
-  /**
-   * Removes the path of the parent.
-   * @param parentPath
-   * @param childPath
-   * @return
-   */
-  private String extractSubPath(Path parentPath, Path childPath) {
-    String parentPathStr = parentPath.toUri().getPath();
-    String childPathStr = childPath.toUri().getPath();
-
-    if (parentPathStr.length() > childPathStr.length()) {
-      return null;
-    }
-
-    int index = childPathStr.indexOf(parentPathStr);
-    if (index != 0) {
-      return null;
-    }
-
-    return childPathStr.substring(parentPathStr.length() + 1);
-  }
-
-  /**
-   * Attach the sequence number to a path.
-   *
-   * @param path Path
-   * @param seq sequence number
-   * @param nf Number format
-   * @return New path attached with sequence number
-   * @throws java.io.IOException
-   */
-  private String replaceFileNameSeq(Path path, int seq, NumberFormat nf) throws IOException {
-    String[] tokens = path.getName().split("-");
-    if (tokens.length != 4) {
-      throw new IOException("Wrong result file name:" + path);
-    }
-    return tokens[0] + "-" + tokens[1] + "-" + tokens[2] + "-" + nf.format(seq);
-  }
-
-  /**
-   * Make sure all files are moved.
-   * @param fs FileSystem
-   * @param stagingPath The stagind directory
-   * @return
-   * @throws java.io.IOException
-   */
-  private boolean verifyAllFileMoved(FileSystem fs, Path stagingPath) throws IOException {
-    FileStatus[] files = fs.listStatus(stagingPath);
-    if (files != null && files.length != 0) {
-      for (FileStatus eachFile: files) {
-        if (eachFile.isFile()) {
-          LOG.error("There are some unmoved files in staging dir:" + eachFile.getPath());
-          return false;
-        } else {
-          if (verifyAllFileMoved(fs, eachFile.getPath())) {
-            fs.delete(eachFile.getPath(), false);
-          } else {
-            return false;
-          }
-        }
-      }
-    }
-
-    return true;
-  }
-
-  /**
-   * This method sets a rename map which includes renamed staging directory to final output directory recursively.
-   * If there exists some data files, this delete it for duplicate data.
-   *
-   *
-   * @param fs
-   * @param stagingPath
-   * @param outputPath
-   * @param stagingParentPathString
-   * @throws java.io.IOException
-   */
-  private void visitPartitionedDirectory(FileSystem fs, Path stagingPath, Path outputPath,
-                                         String stagingParentPathString,
-                                         Map<Path, Path> renameDirs, Path oldTableDir) throws IOException {
-    FileStatus[] files = fs.listStatus(stagingPath);
-
-    for(FileStatus eachFile : files) {
-      if (eachFile.isDirectory()) {
-        Path oldPath = eachFile.getPath();
-
-        // Make recover directory.
-        String recoverPathString = oldPath.toString().replaceAll(stagingParentPathString,
-            oldTableDir.toString());
-        Path recoveryPath = new Path(recoverPathString);
-        if (!fs.exists(recoveryPath)) {
-          fs.mkdirs(recoveryPath);
-        }
-
-        visitPartitionedDirectory(fs, eachFile.getPath(), outputPath, stagingParentPathString,
-            renameDirs, oldTableDir);
-        // Find last order partition for renaming
-        String newPathString = oldPath.toString().replaceAll(stagingParentPathString,
-            outputPath.toString());
-        Path newPath = new Path(newPathString);
-        if (!isLeafDirectory(fs, eachFile.getPath())) {
-          renameDirs.put(eachFile.getPath(), newPath);
-        } else {
-          if (!fs.exists(newPath)) {
-            fs.mkdirs(newPath);
-          }
-        }
-      }
-    }
-  }
-
-  private boolean isLeafDirectory(FileSystem fs, Path path) throws IOException {
-    boolean retValue = false;
-
-    FileStatus[] files = fs.listStatus(path);
-    for (FileStatus file : files) {
-      if (fs.isDirectory(file.getPath())) {
-        retValue = true;
-        break;
-      }
-    }
-
-    return retValue;
-  }
+                               TableDesc tableDesc) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpace.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpace.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpace.java
new file mode 100644
index 0000000..ef4aa9a
--- /dev/null
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpace.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.storage;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.ExecutionBlockId;
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.TableMeta;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.logical.ScanNode;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
+import org.apache.tajo.storage.fragment.Fragment;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * It manages each tablespace; e.g., HDFS, Local file system, and Amazon S3.
+ */
+public interface TableSpace extends Closeable {
+  //public void format() throws IOException;
+
+  void createTable(TableDesc tableDesc, boolean ifNotExists) throws IOException;
+
+  void purgeTable(TableDesc tableDesc) throws IOException;
+
+  List<Fragment> getSplits(String fragmentId, TableDesc tableDesc, ScanNode scanNode) throws IOException;
+
+  List<Fragment> getSplits(String fragmentId, TableDesc tableDesc) throws IOException;
+
+//  public void renameTable() throws IOException;
+//
+//  public void truncateTable() throws IOException;
+//
+//  public long availableCapacity() throws IOException;
+//
+//  public long totalCapacity() throws IOException;
+
+  Scanner getScanner(TableMeta meta, Schema schema, CatalogProtos.FragmentProto fragment, Schema target) throws IOException;
+
+  Scanner getScanner(TableMeta meta, Schema schema, Fragment fragment) throws IOException;
+
+  Scanner getScanner(TableMeta meta, Schema schema, Fragment fragment, Schema target) throws IOException;
+
+  Path commitOutputData(OverridableConf queryContext, ExecutionBlockId finalEbId,
+                               LogicalPlan plan, Schema schema,
+                               TableDesc tableDesc) throws IOException;
+
+  void verifyInsertTableSchema(TableDesc tableDesc, Schema outSchema) throws IOException;
+
+  List<LogicalPlanRewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc) throws IOException;
+
+  void close() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpaceManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpaceManager.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpaceManager.java
new file mode 100644
index 0000000..42a5e07
--- /dev/null
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/TableSpaceManager.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.storage;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.TaskAttemptId;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableMeta;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.storage.fragment.Fragment;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * It handles available table spaces and cache TableSpace instances.
+ */
+public class TableSpaceManager {
+
+  /**
+   * Cache of scanner handlers for each storage type.
+   */
+  protected static final Map<String, Class<? extends Scanner>> SCANNER_HANDLER_CACHE
+      = new ConcurrentHashMap<String, Class<? extends Scanner>>();
+  /**
+   * Cache of appender handlers for each storage type.
+   */
+  protected static final Map<String, Class<? extends Appender>> APPENDER_HANDLER_CACHE
+      = new ConcurrentHashMap<String, Class<? extends Appender>>();
+  private static final Class<?>[] DEFAULT_SCANNER_PARAMS = {
+      Configuration.class,
+      Schema.class,
+      TableMeta.class,
+      Fragment.class
+  };
+  private static final Class<?>[] DEFAULT_APPENDER_PARAMS = {
+      Configuration.class,
+      TaskAttemptId.class,
+      Schema.class,
+      TableMeta.class,
+      Path.class
+  };
+  /**
+   * Cache of StorageManager.
+   * Key is manager key(warehouse path) + store type
+   */
+  private static final Map<String, StorageManager> storageManagers = Maps.newHashMap();
+  /**
+   * Cache of constructors for each class. Pins the classes so they
+   * can't be garbage collected until ReflectionUtils can be collected.
+   */
+  private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE =
+      new ConcurrentHashMap<Class<?>, Constructor<?>>();
+
+  /**
+   * Clear all class cache
+   */
+  @VisibleForTesting
+  protected synchronized static void clearCache() {
+    CONSTRUCTOR_CACHE.clear();
+    SCANNER_HANDLER_CACHE.clear();
+    APPENDER_HANDLER_CACHE.clear();
+    storageManagers.clear();
+  }
+
+  /**
+   * Close StorageManager
+   * @throws java.io.IOException
+   */
+  public static void shutdown() throws IOException {
+    synchronized(storageManagers) {
+      for (StorageManager eachStorageManager: storageManagers.values()) {
+        eachStorageManager.close();
+      }
+    }
+    clearCache();
+  }
+
+  /**
+   * Returns FileStorageManager instance.
+   *
+   * @param tajoConf Tajo system property.
+   * @return
+   * @throws IOException
+   */
+  public static StorageManager getFileStorageManager(TajoConf tajoConf) throws IOException {
+    return getStorageManager(tajoConf, "CSV");
+  }
+
+  /**
+   * Returns the proper StorageManager instance according to the storeType.
+   *
+   * @param tajoConf Tajo system property.
+   * @param storeType Storage type
+   * @return
+   * @throws IOException
+   */
+  public static StorageManager getStorageManager(TajoConf tajoConf, String storeType) throws IOException {
+    FileSystem fileSystem = TajoConf.getWarehouseDir(tajoConf).getFileSystem(tajoConf);
+    if (fileSystem != null) {
+      return getStorageManager(tajoConf, storeType, fileSystem.getUri().toString());
+    } else {
+      return getStorageManager(tajoConf, storeType, null);
+    }
+  }
+
+  /**
+   * Returns the proper StorageManager instance according to the storeType
+   *
+   * @param tajoConf Tajo system property.
+   * @param storeType Storage type
+   * @param managerKey Key that can identify each storage manager(may be a path)
+   * @return
+   * @throws IOException
+   */
+  private static synchronized StorageManager getStorageManager (
+      TajoConf tajoConf, String storeType, String managerKey) throws IOException {
+
+    String typeName;
+    if (storeType.equalsIgnoreCase("HBASE")) {
+      typeName = "hbase";
+    } else {
+      typeName = "hdfs";
+    }
+
+    synchronized (storageManagers) {
+      String storeKey = typeName + "_" + managerKey;
+      StorageManager manager = storageManagers.get(storeKey);
+
+      if (manager == null) {
+        Class<? extends StorageManager> storageManagerClass =
+            tajoConf.getClass(String.format("tajo.storage.manager.%s.class", typeName), null, StorageManager.class);
+
+        if (storageManagerClass == null) {
+          throw new IOException("Unknown Storage Type: " + typeName);
+        }
+
+        try {
+          Constructor<? extends StorageManager> constructor =
+              (Constructor<? extends StorageManager>) CONSTRUCTOR_CACHE.get(storageManagerClass);
+          if (constructor == null) {
+            constructor = storageManagerClass.getDeclaredConstructor(new Class<?>[]{String.class});
+            constructor.setAccessible(true);
+            CONSTRUCTOR_CACHE.put(storageManagerClass, constructor);
+          }
+          manager = constructor.newInstance(new Object[]{storeType});
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+        manager.init(tajoConf);
+        storageManagers.put(storeKey, manager);
+      }
+
+      return manager;
+    }
+  }
+
+  /**
+   * Returns Scanner instance.
+   *
+   * @param conf The system property
+   * @param meta The table meta
+   * @param schema The input schema
+   * @param fragment The fragment for scanning
+   * @param target The output schema
+   * @return Scanner instance
+   * @throws IOException
+   */
+  public static synchronized SeekableScanner getSeekableScanner(
+      TajoConf conf, TableMeta meta, Schema schema, Fragment fragment, Schema target) throws IOException {
+    return (SeekableScanner)getStorageManager(conf, meta.getStoreType()).getScanner(meta, schema, fragment, target);
+  }
+
+  /**
+   * Creates a scanner instance.
+   *
+   * @param theClass Concrete class of scanner
+   * @param conf System property
+   * @param schema Input schema
+   * @param meta Table meta data
+   * @param fragment The fragment for scanning
+   * @param <T>
+   * @return The scanner instance
+   */
+  public static <T> T newScannerInstance(Class<T> theClass, Configuration conf, Schema schema, TableMeta meta,
+                                         Fragment fragment) {
+    T result;
+    try {
+      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
+      if (meth == null) {
+        meth = theClass.getDeclaredConstructor(DEFAULT_SCANNER_PARAMS);
+        meth.setAccessible(true);
+        CONSTRUCTOR_CACHE.put(theClass, meth);
+      }
+      result = meth.newInstance(new Object[]{conf, schema, meta, fragment});
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+
+    return result;
+  }
+
+  /**
+   * Creates a scanner instance.
+   *
+   * @param theClass Concrete class of scanner
+   * @param conf System property
+   * @param taskAttemptId Task id
+   * @param meta Table meta data
+   * @param schema Input schema
+   * @param workDir Working directory
+   * @param <T>
+   * @return The scanner instance
+   */
+  public static <T> T newAppenderInstance(Class<T> theClass, Configuration conf, TaskAttemptId taskAttemptId,
+                                          TableMeta meta, Schema schema, Path workDir) {
+    T result;
+    try {
+      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
+      if (meth == null) {
+        meth = theClass.getDeclaredConstructor(DEFAULT_APPENDER_PARAMS);
+        meth.setAccessible(true);
+        CONSTRUCTOR_CACHE.put(theClass, meth);
+      }
+      result = meth.newInstance(new Object[]{conf, taskAttemptId, schema, meta, workDir});
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBasePutAppender.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBasePutAppender.java b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBasePutAppender.java
index 7f3cb04..09a86b4 100644
--- a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBasePutAppender.java
+++ b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBasePutAppender.java
@@ -28,7 +28,7 @@ import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
-import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.storage.Tuple;
 
 import java.io.IOException;
@@ -47,7 +47,7 @@ public class HBasePutAppender extends AbstractHBaseAppender {
     super.init();
 
     Configuration hbaseConf = HBaseStorageManager.getHBaseConfiguration(conf, meta);
-    HConnection hconn = ((HBaseStorageManager) StorageManager.getStorageManager((TajoConf)conf, "HBASE"))
+    HConnection hconn = ((HBaseStorageManager) TableSpaceManager.getStorageManager((TajoConf) conf, "HBASE"))
         .getConnection(hbaseConf);
     htable = hconn.getTable(columnMapping.getHbaseTableName());
     htable.setAutoFlushTo(false);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseScanner.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseScanner.java b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseScanner.java
index df60bb3..24bfd4d 100644
--- a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseScanner.java
+++ b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseScanner.java
@@ -36,10 +36,7 @@ import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
 import org.apache.tajo.datum.NullDatum;
 import org.apache.tajo.datum.TextDatum;
-import org.apache.tajo.storage.Scanner;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
+import org.apache.tajo.storage.*;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.util.BytesUtils;
 
@@ -184,7 +181,7 @@ public class HBaseScanner implements Scanner {
     }
 
     if (htable == null) {
-      HConnection hconn = ((HBaseStorageManager)StorageManager.getStorageManager(conf, "HBASE"))
+      HConnection hconn = ((HBaseStorageManager) TableSpaceManager.getStorageManager(conf, "HBASE"))
           .getConnection(hbaseConf);
       htable = hconn.getTable(fragment.getHbaseTableName());
     }

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
index 5f0695c..3653574 100644
--- a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
+++ b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
@@ -18,6 +18,7 @@
 
 package org.apache.tajo.storage.hbase;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -42,6 +43,7 @@ import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
 import org.apache.tajo.datum.TextDatum;
+import org.apache.tajo.exception.UnimplementedException;
 import org.apache.tajo.plan.LogicalPlan;
 import org.apache.tajo.plan.expr.*;
 import org.apache.tajo.plan.logical.CreateTableNode;
@@ -78,7 +80,7 @@ public class HBaseStorageManager extends StorageManager {
   }
 
   @Override
-  public void closeStorageManager() {
+  public void close() {
     synchronized (connMap) {
       for (HConnection eachConn: connMap.values()) {
         try {
@@ -942,6 +944,8 @@ public class HBaseStorageManager extends StorageManager {
     if (tableDesc == null) {
       throw new IOException("TableDesc is null while calling loadIncrementalHFiles: " + finalEbId);
     }
+    Preconditions.checkArgument(tableDesc.getName() != null && tableDesc.getPath() == null);
+
     Path stagingDir = new Path(queryContext.get(QueryVars.STAGING_DIR));
     Path stagingResultDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
 
@@ -960,29 +964,23 @@ public class HBaseStorageManager extends StorageManager {
     }
     committer.commitJob(jobContext);
 
-    if (tableDesc.getName() == null && tableDesc.getPath() != null) {
-
-      // insert into location
-      return super.commitOutputData(queryContext, finalEbId, plan, schema, tableDesc, false);
-    } else {
-      // insert into table
-      String tableName = tableDesc.getMeta().getOption(HBaseStorageConstants.META_TABLE_KEY);
+    // insert into table
+    String tableName = tableDesc.getMeta().getOption(HBaseStorageConstants.META_TABLE_KEY);
 
-      HTable htable = new HTable(hbaseConf, tableName);
+    HTable htable = new HTable(hbaseConf, tableName);
+    try {
+      LoadIncrementalHFiles loadIncrementalHFiles = null;
       try {
-        LoadIncrementalHFiles loadIncrementalHFiles = null;
-        try {
-          loadIncrementalHFiles = new LoadIncrementalHFiles(hbaseConf);
-        } catch (Exception e) {
-          LOG.error(e.getMessage(), e);
-          throw new IOException(e.getMessage(), e);
-        }
-        loadIncrementalHFiles.doBulkLoad(stagingResultDir, htable);
-
-        return stagingResultDir;
-      } finally {
-        htable.close();
+        loadIncrementalHFiles = new LoadIncrementalHFiles(hbaseConf);
+      } catch (Exception e) {
+        LOG.error(e.getMessage(), e);
+        throw new IOException(e.getMessage(), e);
       }
+      loadIncrementalHFiles.doBulkLoad(stagingResultDir, htable);
+
+      return stagingResultDir;
+    } finally {
+      htable.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hbase/src/test/java/org/apache/tajo/storage/hbase/TestHBaseStorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hbase/src/test/java/org/apache/tajo/storage/hbase/TestHBaseStorageManager.java b/tajo-storage/tajo-storage-hbase/src/test/java/org/apache/tajo/storage/hbase/TestHBaseStorageManager.java
index aa7aa28..39ccf44 100644
--- a/tajo-storage/tajo-storage-hbase/src/test/java/org/apache/tajo/storage/hbase/TestHBaseStorageManager.java
+++ b/tajo-storage/tajo-storage-hbase/src/test/java/org/apache/tajo/storage/hbase/TestHBaseStorageManager.java
@@ -19,14 +19,13 @@
 package org.apache.tajo.storage.hbase;
 
 import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
 import org.apache.tajo.datum.TextDatum;
 import org.apache.tajo.plan.expr.*;
 import org.apache.tajo.plan.logical.ScanNode;
-import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.Pair;
 import org.junit.Test;
 
@@ -48,7 +47,7 @@ public class TestHBaseStorageManager {
     scanNode.setQual(evalNodeA);
 
     HBaseStorageManager storageManager =
-        (HBaseStorageManager) StorageManager.getStorageManager(new TajoConf(), "HBASE");
+        (HBaseStorageManager) TableSpaceManager.getStorageManager(new TajoConf(), "HBASE");
     List<Set<EvalNode>> indexEvals = storageManager.findIndexablePredicateSet(scanNode, new Column[]{rowkeyColumn});
     assertNotNull(indexEvals);
     assertEquals(1, indexEvals.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileAppender.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileAppender.java b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileAppender.java
index 3daed96..c041771 100644
--- a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileAppender.java
+++ b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileAppender.java
@@ -57,7 +57,7 @@ public abstract class FileAppender implements Appender {
           throw new IllegalArgumentException("Configuration must be an instance of TajoConf");
         }
 
-        this.path = ((FileStorageManager)StorageManager.getFileStorageManager((TajoConf) conf))
+        this.path = ((FileStorageManager) TableSpaceManager.getFileStorageManager((TajoConf) conf))
             .getAppenderFilePath(taskAttemptId, workDir);
       } else {
         this.path = workDir;

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileStorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileStorageManager.java b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileStorageManager.java
index 635dade..4efc3b7 100644
--- a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileStorageManager.java
+++ b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/FileStorageManager.java
@@ -27,17 +27,18 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.tajo.OverridableConf;
-import org.apache.tajo.TajoConstants;
-import org.apache.tajo.TaskAttemptId;
+import org.apache.tajo.*;
 import org.apache.tajo.catalog.*;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.plan.LogicalPlan;
 import org.apache.tajo.plan.logical.LogicalNode;
+import org.apache.tajo.plan.logical.NodeType;
 import org.apache.tajo.plan.logical.ScanNode;
 import org.apache.tajo.storage.fragment.FileFragment;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.util.Bytes;
+import org.apache.tajo.util.TUtil;
 
 import java.io.IOException;
 import java.text.NumberFormat;
@@ -864,7 +865,7 @@ public class FileStorageManager extends StorageManager {
   }
 
   @Override
-  public void closeStorageManager() {
+  public void close() {
   }
 
   @Override
@@ -876,6 +877,12 @@ public class FileStorageManager extends StorageManager {
   }
 
   @Override
+  public Path commitOutputData(OverridableConf queryContext, ExecutionBlockId finalEbId, LogicalPlan plan,
+                               Schema schema, TableDesc tableDesc) throws IOException {
+    return commitOutputData(queryContext, true);
+  }
+
+  @Override
   public TupleRange[] getInsertSortRanges(OverridableConf queryContext, TableDesc tableDesc,
                                           Schema inputSchema, SortSpec[] sortSpecs, TupleRange dataRange)
       throws IOException {
@@ -899,6 +906,366 @@ public class FileStorageManager extends StorageManager {
     FileStatus status = fs.getFileStatus(path);
     FileFragment fragment = new FileFragment(path.getName(), path, 0, status.getLen());
 
-    return getSeekableScanner(conf, meta, schema, fragment, schema);
+    return TableSpaceManager.getSeekableScanner(conf, meta, schema, fragment, schema);
+  }
+
+  /**
+   * Finalizes result data. Tajo stores result data in the staging directory.
+   * If the query fails, clean up the staging directory.
+   * Otherwise the query is successful, move to the final directory from the staging directory.
+   *
+   * @param queryContext The query property
+   * @param changeFileSeq If true change result file name with max sequence.
+   * @return Saved path
+   * @throws java.io.IOException
+   */
+  protected Path commitOutputData(OverridableConf queryContext, boolean changeFileSeq) throws IOException {
+    Path stagingDir = new Path(queryContext.get(QueryVars.STAGING_DIR));
+    Path stagingResultDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
+    Path finalOutputDir;
+    if (!queryContext.get(QueryVars.OUTPUT_TABLE_PATH, "").isEmpty()) {
+      finalOutputDir = new Path(queryContext.get(QueryVars.OUTPUT_TABLE_PATH));
+      try {
+        FileSystem fs = stagingResultDir.getFileSystem(conf);
+
+        if (queryContext.getBool(QueryVars.OUTPUT_OVERWRITE, false)) { // INSERT OVERWRITE INTO
+
+          // It moves the original table into the temporary location.
+          // Then it moves the new result table into the original table location.
+          // Upon failed, it recovers the original table if possible.
+          boolean movedToOldTable = false;
+          boolean committed = false;
+          Path oldTableDir = new Path(stagingDir, TajoConstants.INSERT_OVERWIRTE_OLD_TABLE_NAME);
+          ContentSummary summary = fs.getContentSummary(stagingResultDir);
+
+          if (!queryContext.get(QueryVars.OUTPUT_PARTITIONS, "").isEmpty() && summary.getFileCount() > 0L) {
+            // This is a map for existing non-leaf directory to rename. A key is current directory and a value is
+            // renaming directory.
+            Map<Path, Path> renameDirs = TUtil.newHashMap();
+            // This is a map for recovering existing partition directory. A key is current directory and a value is
+            // temporary directory to back up.
+            Map<Path, Path> recoveryDirs = TUtil.newHashMap();
+
+            try {
+              if (!fs.exists(finalOutputDir)) {
+                fs.mkdirs(finalOutputDir);
+              }
+
+              visitPartitionedDirectory(fs, stagingResultDir, finalOutputDir, stagingResultDir.toString(),
+                  renameDirs, oldTableDir);
+
+              // Rename target partition directories
+              for(Map.Entry<Path, Path> entry : renameDirs.entrySet()) {
+                // Backup existing data files for recovering
+                if (fs.exists(entry.getValue())) {
+                  String recoveryPathString = entry.getValue().toString().replaceAll(finalOutputDir.toString(),
+                      oldTableDir.toString());
+                  Path recoveryPath = new Path(recoveryPathString);
+                  fs.rename(entry.getValue(), recoveryPath);
+                  fs.exists(recoveryPath);
+                  recoveryDirs.put(entry.getValue(), recoveryPath);
+                }
+                // Delete existing directory
+                fs.delete(entry.getValue(), true);
+                // Rename staging directory to final output directory
+                fs.rename(entry.getKey(), entry.getValue());
+              }
+
+            } catch (IOException ioe) {
+              // Remove created dirs
+              for(Map.Entry<Path, Path> entry : renameDirs.entrySet()) {
+                fs.delete(entry.getValue(), true);
+              }
+
+              // Recovery renamed dirs
+              for(Map.Entry<Path, Path> entry : recoveryDirs.entrySet()) {
+                fs.delete(entry.getValue(), true);
+                fs.rename(entry.getValue(), entry.getKey());
+              }
+
+              throw new IOException(ioe.getMessage());
+            }
+          } else { // no partition
+            try {
+
+              // if the final output dir exists, move all contents to the temporary table dir.
+              // Otherwise, just make the final output dir. As a result, the final output dir will be empty.
+              if (fs.exists(finalOutputDir)) {
+                fs.mkdirs(oldTableDir);
+
+                for (FileStatus status : fs.listStatus(finalOutputDir, StorageManager.hiddenFileFilter)) {
+                  fs.rename(status.getPath(), oldTableDir);
+                }
+
+                movedToOldTable = fs.exists(oldTableDir);
+              } else { // if the parent does not exist, make its parent directory.
+                fs.mkdirs(finalOutputDir);
+              }
+
+              // Move the results to the final output dir.
+              for (FileStatus status : fs.listStatus(stagingResultDir)) {
+                fs.rename(status.getPath(), finalOutputDir);
+              }
+
+              // Check the final output dir
+              committed = fs.exists(finalOutputDir);
+
+            } catch (IOException ioe) {
+              // recover the old table
+              if (movedToOldTable && !committed) {
+
+                // if commit is failed, recover the old data
+                for (FileStatus status : fs.listStatus(finalOutputDir, StorageManager.hiddenFileFilter)) {
+                  fs.delete(status.getPath(), true);
+                }
+
+                for (FileStatus status : fs.listStatus(oldTableDir)) {
+                  fs.rename(status.getPath(), finalOutputDir);
+                }
+              }
+
+              throw new IOException(ioe.getMessage());
+            }
+          }
+        } else {
+          String queryType = queryContext.get(QueryVars.COMMAND_TYPE);
+
+          if (queryType != null && queryType.equals(NodeType.INSERT.name())) { // INSERT INTO an existing table
+
+            NumberFormat fmt = NumberFormat.getInstance();
+            fmt.setGroupingUsed(false);
+            fmt.setMinimumIntegerDigits(3);
+
+            if (!queryContext.get(QueryVars.OUTPUT_PARTITIONS, "").isEmpty()) {
+              for(FileStatus eachFile: fs.listStatus(stagingResultDir)) {
+                if (eachFile.isFile()) {
+                  LOG.warn("Partition table can't have file in a staging dir: " + eachFile.getPath());
+                  continue;
+                }
+                moveResultFromStageToFinal(fs, stagingResultDir, eachFile, finalOutputDir, fmt, -1, changeFileSeq);
+              }
+            } else {
+              int maxSeq = StorageUtil.getMaxFileSequence(fs, finalOutputDir, false) + 1;
+              for(FileStatus eachFile: fs.listStatus(stagingResultDir)) {
+                if (eachFile.getPath().getName().startsWith("_")) {
+                  continue;
+                }
+                moveResultFromStageToFinal(fs, stagingResultDir, eachFile, finalOutputDir, fmt, maxSeq++, changeFileSeq);
+              }
+            }
+            // checking all file moved and remove empty dir
+            verifyAllFileMoved(fs, stagingResultDir);
+            FileStatus[] files = fs.listStatus(stagingResultDir);
+            if (files != null && files.length != 0) {
+              for (FileStatus eachFile: files) {
+                LOG.error("There are some unmoved files in staging dir:" + eachFile.getPath());
+              }
+            }
+          } else { // CREATE TABLE AS SELECT (CTAS)
+            if (fs.exists(finalOutputDir)) {
+              for (FileStatus status : fs.listStatus(stagingResultDir)) {
+                fs.rename(status.getPath(), finalOutputDir);
+              }
+            } else {
+              fs.rename(stagingResultDir, finalOutputDir);
+            }
+            LOG.info("Moved from the staging dir to the output directory '" + finalOutputDir);
+          }
+        }
+
+        // remove the staging directory if the final output dir is given.
+        Path stagingDirRoot = stagingDir.getParent();
+        fs.delete(stagingDirRoot, true);
+      } catch (Throwable t) {
+        LOG.error(t);
+        throw new IOException(t);
+      }
+    } else {
+      finalOutputDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
+    }
+
+    return finalOutputDir;
+  }
+
+  /**
+   * Attach the sequence number to the output file name and than move the file into the final result path.
+   *
+   * @param fs FileSystem
+   * @param stagingResultDir The staging result dir
+   * @param fileStatus The file status
+   * @param finalOutputPath Final output path
+   * @param nf Number format
+   * @param fileSeq The sequence number
+   * @throws java.io.IOException
+   */
+  private void moveResultFromStageToFinal(FileSystem fs, Path stagingResultDir,
+                                          FileStatus fileStatus, Path finalOutputPath,
+                                          NumberFormat nf,
+                                          int fileSeq, boolean changeFileSeq) throws IOException {
+    if (fileStatus.isDirectory()) {
+      String subPath = extractSubPath(stagingResultDir, fileStatus.getPath());
+      if (subPath != null) {
+        Path finalSubPath = new Path(finalOutputPath, subPath);
+        if (!fs.exists(finalSubPath)) {
+          fs.mkdirs(finalSubPath);
+        }
+        int maxSeq = StorageUtil.getMaxFileSequence(fs, finalSubPath, false);
+        for (FileStatus eachFile : fs.listStatus(fileStatus.getPath())) {
+          if (eachFile.getPath().getName().startsWith("_")) {
+            continue;
+          }
+          moveResultFromStageToFinal(fs, stagingResultDir, eachFile, finalOutputPath, nf, ++maxSeq, changeFileSeq);
+        }
+      } else {
+        throw new IOException("Wrong staging dir:" + stagingResultDir + "," + fileStatus.getPath());
+      }
+    } else {
+      String subPath = extractSubPath(stagingResultDir, fileStatus.getPath());
+      if (subPath != null) {
+        Path finalSubPath = new Path(finalOutputPath, subPath);
+        if (changeFileSeq) {
+          finalSubPath = new Path(finalSubPath.getParent(), replaceFileNameSeq(finalSubPath, fileSeq, nf));
+        }
+        if (!fs.exists(finalSubPath.getParent())) {
+          fs.mkdirs(finalSubPath.getParent());
+        }
+        if (fs.exists(finalSubPath)) {
+          throw new IOException("Already exists data file:" + finalSubPath);
+        }
+        boolean success = fs.rename(fileStatus.getPath(), finalSubPath);
+        if (success) {
+          LOG.info("Moving staging file[" + fileStatus.getPath() + "] + " +
+              "to final output[" + finalSubPath + "]");
+        } else {
+          LOG.error("Can't move staging file[" + fileStatus.getPath() + "] + " +
+              "to final output[" + finalSubPath + "]");
+        }
+      }
+    }
+  }
+
+  /**
+   * Removes the path of the parent.
+   * @param parentPath
+   * @param childPath
+   * @return
+   */
+  private String extractSubPath(Path parentPath, Path childPath) {
+    String parentPathStr = parentPath.toUri().getPath();
+    String childPathStr = childPath.toUri().getPath();
+
+    if (parentPathStr.length() > childPathStr.length()) {
+      return null;
+    }
+
+    int index = childPathStr.indexOf(parentPathStr);
+    if (index != 0) {
+      return null;
+    }
+
+    return childPathStr.substring(parentPathStr.length() + 1);
+  }
+
+  /**
+   * Attach the sequence number to a path.
+   *
+   * @param path Path
+   * @param seq sequence number
+   * @param nf Number format
+   * @return New path attached with sequence number
+   * @throws java.io.IOException
+   */
+  private String replaceFileNameSeq(Path path, int seq, NumberFormat nf) throws IOException {
+    String[] tokens = path.getName().split("-");
+    if (tokens.length != 4) {
+      throw new IOException("Wrong result file name:" + path);
+    }
+    return tokens[0] + "-" + tokens[1] + "-" + tokens[2] + "-" + nf.format(seq);
+  }
+
+  /**
+   * Make sure all files are moved.
+   * @param fs FileSystem
+   * @param stagingPath The stagind directory
+   * @return
+   * @throws java.io.IOException
+   */
+  private boolean verifyAllFileMoved(FileSystem fs, Path stagingPath) throws IOException {
+    FileStatus[] files = fs.listStatus(stagingPath);
+    if (files != null && files.length != 0) {
+      for (FileStatus eachFile: files) {
+        if (eachFile.isFile()) {
+          LOG.error("There are some unmoved files in staging dir:" + eachFile.getPath());
+          return false;
+        } else {
+          if (verifyAllFileMoved(fs, eachFile.getPath())) {
+            fs.delete(eachFile.getPath(), false);
+          } else {
+            return false;
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+  /**
+   * This method sets a rename map which includes renamed staging directory to final output directory recursively.
+   * If there exists some data files, this delete it for duplicate data.
+   *
+   *
+   * @param fs
+   * @param stagingPath
+   * @param outputPath
+   * @param stagingParentPathString
+   * @throws java.io.IOException
+   */
+  private void visitPartitionedDirectory(FileSystem fs, Path stagingPath, Path outputPath,
+                                         String stagingParentPathString,
+                                         Map<Path, Path> renameDirs, Path oldTableDir) throws IOException {
+    FileStatus[] files = fs.listStatus(stagingPath);
+
+    for(FileStatus eachFile : files) {
+      if (eachFile.isDirectory()) {
+        Path oldPath = eachFile.getPath();
+
+        // Make recover directory.
+        String recoverPathString = oldPath.toString().replaceAll(stagingParentPathString,
+            oldTableDir.toString());
+        Path recoveryPath = new Path(recoverPathString);
+        if (!fs.exists(recoveryPath)) {
+          fs.mkdirs(recoveryPath);
+        }
+
+        visitPartitionedDirectory(fs, eachFile.getPath(), outputPath, stagingParentPathString,
+            renameDirs, oldTableDir);
+        // Find last order partition for renaming
+        String newPathString = oldPath.toString().replaceAll(stagingParentPathString,
+            outputPath.toString());
+        Path newPath = new Path(newPathString);
+        if (!isLeafDirectory(fs, eachFile.getPath())) {
+          renameDirs.put(eachFile.getPath(), newPath);
+        } else {
+          if (!fs.exists(newPath)) {
+            fs.mkdirs(newPath);
+          }
+        }
+      }
+    }
+  }
+
+  private boolean isLeafDirectory(FileSystem fs, Path path) throws IOException {
+    boolean retValue = false;
+
+    FileStatus[] files = fs.listStatus(path);
+    for (FileStatus file : files) {
+      if (fs.isDirectory(file.getPath())) {
+        retValue = true;
+        break;
+      }
+    }
+
+    return retValue;
   }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/HashShuffleAppenderManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/HashShuffleAppenderManager.java b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/HashShuffleAppenderManager.java
index 4635b76..1846ed6 100644
--- a/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/HashShuffleAppenderManager.java
+++ b/tajo-storage/tajo-storage-hdfs/src/main/java/org/apache/tajo/storage/HashShuffleAppenderManager.java
@@ -30,11 +30,9 @@ import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.conf.TajoConf.ConfVars;
-import org.apache.tajo.util.Pair;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -84,7 +82,7 @@ public class HashShuffleAppenderManager {
         if (!fs.exists(dataFile.getParent())) {
           fs.mkdirs(dataFile.getParent());
         }
-        FileAppender appender = (FileAppender)((FileStorageManager)StorageManager.getFileStorageManager(tajoConf))
+        FileAppender appender = (FileAppender)((FileStorageManager) TableSpaceManager.getFileStorageManager(tajoConf))
             .getAppender(meta, outSchema, dataFile);
         appender.enableStats();
         appender.init();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestCompressionStorages.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestCompressionStorages.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestCompressionStorages.java
index 68a2cf2..779f908 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestCompressionStorages.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestCompressionStorages.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
@@ -123,7 +122,7 @@ public class TestCompressionStorages {
 
     String fileName = "Compression_" + codec.getSimpleName();
     Path tablePath = new Path(testDir, fileName);
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getAppender(meta, schema, tablePath);
     appender.enableStats();
 
     appender.init();
@@ -155,7 +154,7 @@ public class TestCompressionStorages {
     FileFragment[] tablets = new FileFragment[1];
     tablets[0] = new FileFragment(fileName, tablePath, 0, fileLen);
 
-    Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema, tablets[0], schema);
+    Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, tablets[0], schema);
 
     if (storeType.equalsIgnoreCase("CSV")) {
       if (SplittableCompressionCodec.class.isAssignableFrom(codec)) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestDelimitedTextFile.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestDelimitedTextFile.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestDelimitedTextFile.java
index 6e15c51..2260d2a 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestDelimitedTextFile.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestDelimitedTextFile.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -104,7 +103,7 @@ public class TestDelimitedTextFile {
     TableMeta meta = CatalogUtil.newTableMeta("JSON");
     meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "-1");
     FileFragment fragment =  getFileFragment("testErrorTolerance1.json");
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     Tuple tuple;
@@ -126,7 +125,7 @@ public class TestDelimitedTextFile {
     TableMeta meta = CatalogUtil.newTableMeta("JSON");
     meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "1");
     FileFragment fragment =  getFileFragment("testErrorTolerance1.json");
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     assertNotNull(scanner.next());
@@ -148,7 +147,7 @@ public class TestDelimitedTextFile {
     TableMeta meta = CatalogUtil.newTableMeta("JSON");
     meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "0");
     FileFragment fragment =  getFileFragment("testErrorTolerance2.json");
-    Scanner scanner =  StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner =  TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     try {
@@ -167,7 +166,7 @@ public class TestDelimitedTextFile {
     TableMeta meta = CatalogUtil.newTableMeta("JSON");
     meta.putOption(StorageUtil.TEXT_ERROR_TOLERANCE_MAXNUM, "1");
     FileFragment fragment = getFileFragment("testErrorTolerance3.json");
-    Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
 
     try {

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileStorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileStorageManager.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileStorageManager.java
index 7d5eee1..41c6c67 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileStorageManager.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileStorageManager.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.*;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -81,7 +80,7 @@ public class TestFileStorageManager {
 
     Path path = StorageUtil.concatPath(testDir, "testGetScannerAndAppender", "table.csv");
     fs.mkdirs(path.getParent());
-    FileStorageManager fileStorageManager = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager fileStorageManager = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     assertEquals(fs.getUri(), fileStorageManager.getFileSystem().getUri());
 
 		Appender appender = fileStorageManager.getAppender(meta, schema, path);
@@ -128,7 +127,7 @@ public class TestFileStorageManager {
       }
 
       assertTrue(fs.exists(tablePath));
-      FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(tajoConf);
+      FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(tajoConf);
       assertEquals(fs.getUri(), sm.getFileSystem().getUri());
 
       Schema schema = new Schema();
@@ -182,7 +181,7 @@ public class TestFileStorageManager {
         DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
       }
       assertTrue(fs.exists(tablePath));
-      FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(tajoConf);
+      FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(tajoConf);
       assertEquals(fs.getUri(), sm.getFileSystem().getUri());
 
       Schema schema = new Schema();
@@ -221,11 +220,11 @@ public class TestFileStorageManager {
 
     try {
       /* Local FileSystem */
-      FileStorageManager sm = (FileStorageManager)StorageManager.getStorageManager(conf, "CSV");
+      FileStorageManager sm = (FileStorageManager) TableSpaceManager.getStorageManager(conf, "CSV");
       assertEquals(fs.getUri(), sm.getFileSystem().getUri());
 
       /* Distributed FileSystem */
-      sm = (FileStorageManager)StorageManager.getStorageManager(tajoConf, "CSV");
+      sm = (FileStorageManager) TableSpaceManager.getStorageManager(tajoConf, "CSV");
       assertNotEquals(fs.getUri(), sm.getFileSystem().getUri());
       assertEquals(cluster.getFileSystem().getUri(), sm.getFileSystem().getUri());
     } finally {

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileSystems.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileSystems.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileSystems.java
index b4a60fc..1222fae 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileSystems.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestFileSystems.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -58,7 +57,7 @@ public class TestFileSystems {
   public TestFileSystems(FileSystem fs) throws IOException {
     this.fs = fs;
     this.conf = new TajoConf(fs.getConf());
-    sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     testDir = getTestDir(this.fs, TEST_PATH);
   }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestLineReader.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestLineReader.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestLineReader.java
index 1078b84..266f906 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestLineReader.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestLineReader.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.io.compress.DeflateCodec;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.DatumFactory;
@@ -66,7 +65,7 @@ public class TestLineReader {
 
     TableMeta meta = CatalogUtil.newTableMeta("TEXT");
     Path tablePath = new Path(testDir, "line.data");
-    FileAppender appender = (FileAppender) StorageManager.getFileStorageManager(conf).getAppender(
+    FileAppender appender = (FileAppender) TableSpaceManager.getFileStorageManager(conf).getAppender(
         null, null, meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -119,7 +118,7 @@ public class TestLineReader {
     meta.putOption("compression.codec", DeflateCodec.class.getCanonicalName());
 
     Path tablePath = new Path(testDir, "testLineDelimitedReaderWithCompression." + DeflateCodec.class.getSimpleName());
-    FileAppender appender = (FileAppender) StorageManager.getFileStorageManager(conf).getAppender(
+    FileAppender appender = (FileAppender) TableSpaceManager.getFileStorageManager(conf).getAppender(
         null, null, meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -177,7 +176,7 @@ public class TestLineReader {
     TableMeta meta = CatalogUtil.newTableMeta("TEXT");
 
     Path tablePath = new Path(testDir, "testLineDelimitedReader");
-    FileAppender appender = (FileAppender) StorageManager.getFileStorageManager(conf).getAppender(
+    FileAppender appender = (FileAppender) TableSpaceManager.getFileStorageManager(conf).getAppender(
         null, null, meta, schema, tablePath);
     appender.enableStats();
     appender.init();
@@ -280,7 +279,7 @@ public class TestLineReader {
 
     TableMeta meta = CatalogUtil.newTableMeta("TEXT");
     Path tablePath = new Path(testDir, "testSeekableByteBufLineReader.data");
-    FileAppender appender = (FileAppender) StorageManager.getFileStorageManager(conf).getAppender(
+    FileAppender appender = (FileAppender) TableSpaceManager.getFileStorageManager(conf).getAppender(
         null, null, meta, schema, tablePath);
     appender.enableStats();
     appender.init();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestMergeScanner.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestMergeScanner.java b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestMergeScanner.java
index 2c856e1..82acaf3 100644
--- a/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestMergeScanner.java
+++ b/tajo-storage/tajo-storage-hdfs/src/test/java/org/apache/tajo/storage/TestMergeScanner.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
@@ -95,7 +94,7 @@ public class TestMergeScanner {
     conf.setStrings("tajo.storage.projectable-scanner", "rcfile", "parquet", "avro");
     testDir = CommonTestingUtil.getTestDir(TEST_PATH);
     fs = testDir.getFileSystem(conf);
-    sm = StorageManager.getFileStorageManager(conf);
+    sm = TableSpaceManager.getFileStorageManager(conf);
   }
 
   @Test
@@ -115,7 +114,7 @@ public class TestMergeScanner {
     }
 
     Path table1Path = new Path(testDir, storeType + "_1.data");
-    Appender appender1 = StorageManager.getFileStorageManager(conf).getAppender(null, null, meta, schema, table1Path);
+    Appender appender1 = TableSpaceManager.getFileStorageManager(conf).getAppender(null, null, meta, schema, table1Path);
     appender1.enableStats();
     appender1.init();
     int tupleNum = 10000;
@@ -137,7 +136,7 @@ public class TestMergeScanner {
     }
 
     Path table2Path = new Path(testDir, storeType + "_2.data");
-    Appender appender2 = StorageManager.getFileStorageManager(conf).getAppender(null, null, meta, schema, table2Path);
+    Appender appender2 = TableSpaceManager.getFileStorageManager(conf).getAppender(null, null, meta, schema, table2Path);
     appender2.enableStats();
     appender2.init();
 


[06/10] tajo git commit: TAJO-1542 Refactoring of HashJoinExecs. (contributed by navis, committed by hyunsik)

Posted by ji...@apache.org.
TAJO-1542 Refactoring of HashJoinExecs. (contributed by navis, committed by hyunsik)

Closes #529 #567


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/36a703c5
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/36a703c5
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/36a703c5

Branch: refs/heads/index_support
Commit: 36a703c5dc2c2257dfd52232f204507fb4b79024
Parents: f3acbdf
Author: Hyunsik Choi <hy...@apache.org>
Authored: Thu May 14 20:59:09 2015 -0700
Committer: Hyunsik Choi <hy...@apache.org>
Committed: Thu May 14 20:59:09 2015 -0700

----------------------------------------------------------------------
 CHANGES                                         |   3 +
 .../java/org/apache/tajo/catalog/Schema.java    |  16 +
 .../org/apache/tajo/storage/EmptyTuple.java     | 140 +-----
 .../java/org/apache/tajo/storage/NullTuple.java | 175 +++++++
 .../java/org/apache/tajo/storage/VTuple.java    |  20 +-
 .../engine/planner/PhysicalPlannerImpl.java     |  22 +-
 .../physical/BasicPhysicalExecutorVisitor.java  |   8 -
 .../planner/physical/CommonHashJoinExec.java    | 191 ++++++++
 .../engine/planner/physical/CommonJoinExec.java | 172 ++++++-
 .../planner/physical/HashFullOuterJoinExec.java | 247 ++++------
 .../engine/planner/physical/HashJoinExec.java   | 212 +--------
 .../planner/physical/HashLeftAntiJoinExec.java  |  59 +--
 .../planner/physical/HashLeftOuterJoinExec.java | 292 +-----------
 .../planner/physical/HashLeftSemiJoinExec.java  |  48 +-
 .../planner/physical/NLLeftOuterJoinExec.java   | 101 ----
 .../physical/PhysicalExecutorVisitor.java       |   3 -
 .../physical/RightOuterMergeJoinExec.java       |  40 +-
 .../apache/tajo/engine/utils/CacheHolder.java   |   3 +-
 .../planner/physical/TestHashSemiJoinExec.java  |   8 +-
 .../physical/TestLeftOuterHashJoinExec.java     | 104 ++--
 .../physical/TestLeftOuterNLJoinExec.java       | 474 -------------------
 .../testJoinFilterOfRowPreservedTable1.sql      |   2 +-
 .../testJoinFilterOfRowPreservedTable1.result   |   2 +-
 .../plan/expr/AggregationFunctionCallEval.java  |   4 +-
 .../apache/tajo/plan/expr/AlgebraicUtil.java    |   5 +
 .../org/apache/tajo/plan/expr/EvalNode.java     |  39 +-
 .../java/org/apache/tajo/plan/expr/InEval.java  |   2 +-
 .../plan/expr/PatternMatchPredicateEval.java    |   2 +-
 .../tajo/plan/expr/WindowFunctionEval.java      |   2 +-
 .../org/apache/tajo/storage/FrameTuple.java     |  14 +-
 30 files changed, 842 insertions(+), 1568 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 77be589..44ae4b4 100644
--- a/CHANGES
+++ b/CHANGES
@@ -24,6 +24,9 @@ Release 0.11.0 - unreleased
 
   IMPROVEMENT
 
+    TAJO-1542: Refactoring of HashJoinExecs. (Contributed Navis, Committed by 
+    hyunsik)
+
     TAJO-1591: Change StoreType represented as Enum to String type. (hyunsik)
 
     TAJO-1452: Improve function listing order (Contributed Dongjoon Hyun, 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
index 0e4b741..80c4d83 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
+++ b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
@@ -400,6 +400,22 @@ public class Schema implements ProtoObject<SchemaProto>, Cloneable, GsonObject {
     return containFlag;
   }
 
+  /**
+   * Return TRUE if any column in <code>columns</code> is included in this schema.
+   *
+   * @param columns Columns to be checked
+   * @return true if any column in <code>columns</code> is included in this schema.
+   *         Otherwise, false.
+   */
+  public boolean containsAny(Collection<Column> columns) {
+    for (Column column : columns) {
+      if (contains(column)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   public synchronized Schema addColumn(String name, TypeDesc typeDesc) {
     String normalized = name;
     if(fieldsByQualifiedName.containsKey(normalized)) {

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-common/src/main/java/org/apache/tajo/storage/EmptyTuple.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/storage/EmptyTuple.java b/tajo-common/src/main/java/org/apache/tajo/storage/EmptyTuple.java
index 89e72ed..cdcebd7 100644
--- a/tajo-common/src/main/java/org/apache/tajo/storage/EmptyTuple.java
+++ b/tajo-common/src/main/java/org/apache/tajo/storage/EmptyTuple.java
@@ -18,17 +18,12 @@
 
 package org.apache.tajo.storage;
 
-import org.apache.tajo.datum.Datum;
-import org.apache.tajo.datum.NullDatum;
-import org.apache.tajo.datum.ProtobufDatum;
-
 /* This class doesn’t have content datum. if selected column is zero, this is useful
 *  e.g. select count(*) from table
 * */
-public class EmptyTuple implements Tuple, Cloneable {
+public class EmptyTuple extends NullTuple {
 
   private static EmptyTuple tuple;
-  private static Datum[] EMPTY_VALUES = new Datum[0];
 
   static {
     tuple = new EmptyTuple();
@@ -39,138 +34,11 @@ public class EmptyTuple implements Tuple, Cloneable {
   }
 
   private EmptyTuple() {
+    super(0);
   }
 
   @Override
-  public int size() {
-    return 0;
-  }
-
-  public boolean contains(int fieldId) {
-    return false;
-  }
-
-  @Override
-  public boolean isNull(int fieldid) {
-    return true;
-  }
-
-  @Override
-  public boolean isNotNull(int fieldid) {
-    return false;
-  }
-
-  @Override
-  public void clear() {
-  }
-
-  @Override
-  public void put(int fieldId, Datum value) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void put(int fieldId, Datum[] values) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void put(int fieldId, Tuple tuple) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void put(Datum[] values) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Datum get(int fieldId) {
-    return NullDatum.get();
-  }
-
-  @Override
-  public void setOffset(long offset) {
-
-  }
-
-  @Override
-  public long getOffset() {
-    return -1;
-  }
-
-  @Override
-  public boolean getBool(int fieldId) {
-    return NullDatum.get().asBool();
-  }
-
-  @Override
-  public byte getByte(int fieldId) {
-    return NullDatum.get().asByte();
-  }
-
-  @Override
-  public char getChar(int fieldId) {
-    return NullDatum.get().asChar();
-  }
-
-  @Override
-  public byte[] getBytes(int fieldId) {
-    return NullDatum.get().asByteArray();
-  }
-
-  @Override
-  public short getInt2(int fieldId) {
-    return NullDatum.get().asInt2();
-  }
-
-  @Override
-  public int getInt4(int fieldId) {
-    return NullDatum.get().asInt4();
-  }
-
-  @Override
-  public long getInt8(int fieldId) {
-    return NullDatum.get().asInt8();
-  }
-
-  @Override
-  public float getFloat4(int fieldId) {
-    return NullDatum.get().asFloat4();
-  }
-
-  @Override
-  public double getFloat8(int fieldId) {
-    return NullDatum.get().asFloat8();
-  }
-
-  @Override
-  public String getText(int fieldId) {
-    return NullDatum.get().asChars();
-  }
-
-  @Override
-  public ProtobufDatum getProtobufDatum(int fieldId) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Datum getInterval(int fieldId) {
-    return NullDatum.get();
-  }
-
-  @Override
-  public char[] getUnicodeChars(int fieldId) {
-    return NullDatum.get().asUnicodeChars();
-  }
-
-  @Override
-  public Tuple clone() throws CloneNotSupportedException {
-    throw new CloneNotSupportedException();
-  }
-
-  @Override
-  public Datum[] getValues() {
-    return EMPTY_VALUES;
+  public Tuple clone() {
+    return this;
   }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-common/src/main/java/org/apache/tajo/storage/NullTuple.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/storage/NullTuple.java b/tajo-common/src/main/java/org/apache/tajo/storage/NullTuple.java
new file mode 100644
index 0000000..45eb859
--- /dev/null
+++ b/tajo-common/src/main/java/org/apache/tajo/storage/NullTuple.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.storage;
+
+import org.apache.tajo.datum.Datum;
+import org.apache.tajo.datum.NullDatum;
+import org.apache.tajo.datum.ProtobufDatum;
+
+import java.util.Arrays;
+
+/**
+ * A tuple which contains all null datums. It is used for outer joins.
+ */
+public class NullTuple implements Tuple, Cloneable {
+
+  public static Tuple create(int size) {
+    return new NullTuple(size);
+  }
+
+  private final int size;
+
+  NullTuple(int size) {
+    this.size = size;
+  }
+
+  @Override
+  public int size() {
+    return size;
+  }
+
+  public boolean contains(int fieldId) {
+    return fieldId < size;
+  }
+
+  @Override
+  public boolean isNull(int fieldid) {
+    return true;
+  }
+
+  @Override
+  public boolean isNotNull(int fieldid) {
+    return false;
+  }
+
+  @Override
+  public void clear() {
+  }
+
+  @Override
+  public void put(int fieldId, Datum value) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void put(int fieldId, Datum[] values) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void put(int fieldId, Tuple tuple) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void put(Datum[] values) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Datum get(int fieldId) {
+    return NullDatum.get();
+  }
+
+  @Override
+  public void setOffset(long offset) {
+  }
+
+  @Override
+  public long getOffset() {
+    return 0;
+  }
+
+  @Override
+  public boolean getBool(int fieldId) {
+    return NullDatum.get().asBool();
+  }
+
+  @Override
+  public byte getByte(int fieldId) {
+    return NullDatum.get().asByte();
+  }
+
+  @Override
+  public char getChar(int fieldId) {
+    return NullDatum.get().asChar();
+  }
+
+  @Override
+  public byte[] getBytes(int fieldId) {
+    return NullDatum.get().asByteArray();
+  }
+
+  @Override
+  public short getInt2(int fieldId) {
+    return NullDatum.get().asInt2();
+  }
+
+  @Override
+  public int getInt4(int fieldId) {
+    return NullDatum.get().asInt4();
+  }
+
+  @Override
+  public long getInt8(int fieldId) {
+    return NullDatum.get().asInt8();
+  }
+
+  @Override
+  public float getFloat4(int fieldId) {
+    return NullDatum.get().asFloat4();
+  }
+
+  @Override
+  public double getFloat8(int fieldId) {
+    return NullDatum.get().asFloat8();
+  }
+
+  @Override
+  public String getText(int fieldId) {
+    return NullDatum.get().asChars();
+  }
+
+  @Override
+  public ProtobufDatum getProtobufDatum(int fieldId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Datum getInterval(int fieldId) {
+    return NullDatum.get();
+  }
+
+  @Override
+  public char[] getUnicodeChars(int fieldId) {
+    return NullDatum.get().asUnicodeChars();
+  }
+
+  @Override
+  public Tuple clone() throws CloneNotSupportedException {
+    return new NullTuple(size);
+  }
+
+  @Override
+  public Datum[] getValues() {
+    Datum[] datum = new Datum[size];
+    Arrays.fill(datum, NullDatum.get());
+    return datum;
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-common/src/main/java/org/apache/tajo/storage/VTuple.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/storage/VTuple.java b/tajo-common/src/main/java/org/apache/tajo/storage/VTuple.java
index 5e839b7..da69eb0 100644
--- a/tajo-common/src/main/java/org/apache/tajo/storage/VTuple.java
+++ b/tajo-common/src/main/java/org/apache/tajo/storage/VTuple.java
@@ -201,6 +201,7 @@ public class VTuple implements Tuple, Cloneable {
     return tuple;
   }
 
+  @Override
   public String toString() {
 		return toDisplayString(getValues());
 	}
@@ -225,22 +226,15 @@ public class VTuple implements Tuple, Cloneable {
   }
 
   public static String toDisplayString(Datum [] values) {
-    boolean first = true;
     StringBuilder str = new StringBuilder();
-    str.append("(");
-    for(int i=0; i < values.length; i++) {
-      if(values[i] != null) {
-        if(first) {
-          first = false;
-        } else {
-          str.append(", ");
-        }
-        str.append(i)
-            .append("=>")
-            .append(values[i]);
+    str.append('(');
+    for (Datum datum : values) {
+      if (str.length() > 1) {
+        str.append(',');
       }
+      str.append(datum);
     }
-    str.append(")");
+    str.append(')');
     return str.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
index 506b03e..978dde8 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
@@ -466,14 +466,14 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
         case IN_MEMORY_HASH_JOIN:
           LOG.info("Left Outer Join (" + plan.getPID() +") chooses [Hash Join].");
           return new HashLeftOuterJoinExec(context, plan, leftExec, rightExec);
-        case NESTED_LOOP_JOIN:
-          //the right operand is too large, so we opt for NL implementation of left outer join
-          LOG.info("Left Outer Join (" + plan.getPID() +") chooses [Nested Loop Join].");
-          return new NLLeftOuterJoinExec(context, plan, leftExec, rightExec);
+        case MERGE_JOIN:
+          //the right operand is too large, so we opt for merge join implementation
+          LOG.info("Left Outer Join (" + plan.getPID() +") chooses [Merge Join].");
+          return createRightOuterMergeJoinPlan(context, plan, rightExec, leftExec);
         default:
           LOG.error("Invalid Left Outer Join Algorithm Enforcer: " + algorithm.name());
-          LOG.error("Choose a fallback inner join algorithm: " + JoinAlgorithm.IN_MEMORY_HASH_JOIN.name());
-          return new HashLeftOuterJoinExec(context, plan, leftExec, rightExec);
+          LOG.error("Choose a fallback to join algorithm: " + JoinAlgorithm.MERGE_JOIN);
+          return createRightOuterMergeJoinPlan(context, plan, rightExec, leftExec);
       }
     } else {
       return createBestLeftOuterJoinPlan(context, plan, leftExec, rightExec);
@@ -500,9 +500,9 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
       return new HashLeftOuterJoinExec(context, plan, leftExec, rightExec);
     }
     else {
-      //the right operand is too large, so we opt for NL implementation of left outer join
-      LOG.info("Left Outer Join (" + plan.getPID() +") chooses [Nested Loop Join].");
-      return new NLLeftOuterJoinExec(context, plan, leftExec, rightExec);
+      //the right operand is too large, so we opt for merge join implementation
+      LOG.info("Left Outer Join (" + plan.getPID() +") chooses [Merge Join].");
+      return createRightOuterMergeJoinPlan(context, plan, rightExec, leftExec);
     }
   }
 
@@ -566,7 +566,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
           return createRightOuterMergeJoinPlan(context, plan, leftExec, rightExec);
         default:
           LOG.error("Invalid Right Outer Join Algorithm Enforcer: " + algorithm.name());
-          LOG.error("Choose a fallback merge join algorithm: " + JoinAlgorithm.MERGE_JOIN.name());
+          LOG.error("Choose a fallback to join algorithm: " + JoinAlgorithm.MERGE_JOIN);
           return createRightOuterMergeJoinPlan(context, plan, leftExec, rightExec);
       }
     } else {
@@ -589,7 +589,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
 
         default:
           LOG.error("Invalid Full Outer Join Algorithm Enforcer: " + algorithm.name());
-          LOG.error("Choose a fallback merge join algorithm: " + JoinAlgorithm.MERGE_JOIN.name());
+          LOG.error("Choose a fallback to join algorithm: " + JoinAlgorithm.MERGE_JOIN);
           return createFullOuterMergeJoinPlan(context, plan, leftExec, rightExec);
       }
     } else {

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BasicPhysicalExecutorVisitor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BasicPhysicalExecutorVisitor.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BasicPhysicalExecutorVisitor.java
index 42611b0..c2d93bb 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BasicPhysicalExecutorVisitor.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BasicPhysicalExecutorVisitor.java
@@ -65,8 +65,6 @@ public class BasicPhysicalExecutorVisitor<CONTEXT, RESULT> implements PhysicalEx
       return visitMergeJoin(context, (MergeJoinExec) exec, stack);
     } else if (exec instanceof NLJoinExec) {
       return visitNLJoin(context, (NLJoinExec) exec, stack);
-    } else if (exec instanceof NLLeftOuterJoinExec) {
-      return visitNLLeftOuterJoin(context, (NLLeftOuterJoinExec) exec, stack);
     } else if (exec instanceof ProjectionExec) {
       return visitProjection(context, (ProjectionExec) exec, stack);
     } else if (exec instanceof RangeShuffleFileWriteExec) {
@@ -214,12 +212,6 @@ public class BasicPhysicalExecutorVisitor<CONTEXT, RESULT> implements PhysicalEx
   }
 
   @Override
-  public RESULT visitNLLeftOuterJoin(CONTEXT context, NLLeftOuterJoinExec exec, Stack<PhysicalExec> stack)
-      throws PhysicalPlanningException {
-    return visitBinaryExecutor(context, exec, stack);
-  }
-
-  @Override
   public RESULT visitProjection(CONTEXT context, ProjectionExec exec, Stack<PhysicalExec> stack)
       throws PhysicalPlanningException {
     return visitUnaryExecutor(context, exec, stack);

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonHashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonHashJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonHashJoinExec.java
new file mode 100644
index 0000000..ff9b253
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonHashJoinExec.java
@@ -0,0 +1,191 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.physical;
+
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.statistics.TableStats;
+import org.apache.tajo.engine.utils.CacheHolder;
+import org.apache.tajo.engine.utils.TableCacheKey;
+import org.apache.tajo.plan.logical.JoinNode;
+import org.apache.tajo.plan.util.PlannerUtil;
+import org.apache.tajo.storage.Tuple;
+import org.apache.tajo.storage.VTuple;
+import org.apache.tajo.worker.ExecutionBlockSharedResource;
+import org.apache.tajo.worker.TaskAttemptContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * common exec for all hash join execs
+ *
+ * @param <T> Tuple collection type to load small relation onto in-memory
+ */
+public abstract class CommonHashJoinExec<T> extends CommonJoinExec {
+
+  protected final List<Column[]> joinKeyPairs;
+
+  // temporal tuples and states for nested loop join
+  protected boolean first = true;
+  protected Map<Tuple, T> tupleSlots;
+
+  protected Iterator<Tuple> iterator;
+
+  protected final Tuple keyTuple;
+
+  protected final int rightNumCols;
+  protected final int leftNumCols;
+
+  protected final int[] leftKeyList;
+  protected final int[] rightKeyList;
+
+  protected boolean finished;
+
+  public CommonHashJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec outer, PhysicalExec inner) {
+    super(context, plan, outer, inner);
+
+    // HashJoin only can manage equi join key pairs.
+    this.joinKeyPairs = PlannerUtil.getJoinKeyPairs(joinQual, outer.getSchema(),
+        inner.getSchema(), false);
+
+    leftKeyList = new int[joinKeyPairs.size()];
+    rightKeyList = new int[joinKeyPairs.size()];
+
+    for (int i = 0; i < joinKeyPairs.size(); i++) {
+      leftKeyList[i] = outer.getSchema().getColumnId(joinKeyPairs.get(i)[0].getQualifiedName());
+    }
+
+    for (int i = 0; i < joinKeyPairs.size(); i++) {
+      rightKeyList[i] = inner.getSchema().getColumnId(joinKeyPairs.get(i)[1].getQualifiedName());
+    }
+
+    leftNumCols = outer.getSchema().size();
+    rightNumCols = inner.getSchema().size();
+
+    keyTuple = new VTuple(leftKeyList.length);
+  }
+
+  protected void loadRightToHashTable() throws IOException {
+    ScanExec scanExec = PhysicalPlanUtil.findExecutor(rightChild, ScanExec.class);
+    if (scanExec.canBroadcast()) {
+      /* If this table can broadcast, all tasks in a node will share the same cache */
+      TableCacheKey key = CacheHolder.BroadcastCacheHolder.getCacheKey(
+          context, scanExec.getCanonicalName(), scanExec.getFragments());
+      loadRightFromCache(key);
+    } else {
+      this.tupleSlots = convert(buildRightToHashTable(), false);
+    }
+
+    first = false;
+  }
+
+  protected void loadRightFromCache(TableCacheKey key) throws IOException {
+    ExecutionBlockSharedResource sharedResource = context.getSharedResource();
+
+    CacheHolder<Map<Tuple, List<Tuple>>> holder;
+    synchronized (sharedResource.getLock()) {
+      if (sharedResource.hasBroadcastCache(key)) {
+        holder = sharedResource.getBroadcastCache(key);
+      } else {
+        Map<Tuple, List<Tuple>> built = buildRightToHashTable();
+        holder = new CacheHolder.BroadcastCacheHolder(built, rightChild.getInputStats(), null);
+        sharedResource.addBroadcastCache(key, holder);
+      }
+    }
+    this.tupleSlots = convert(holder.getData(), true);
+  }
+
+  protected Map<Tuple, List<Tuple>> buildRightToHashTable() throws IOException {
+    Tuple tuple;
+    Map<Tuple, List<Tuple>> map = new HashMap<Tuple, List<Tuple>>(100000);
+
+    while (!context.isStopped() && (tuple = rightChild.next()) != null) {
+      Tuple keyTuple = new VTuple(joinKeyPairs.size());
+      for (int i = 0; i < rightKeyList.length; i++) {
+        keyTuple.put(i, tuple.get(rightKeyList[i]));
+      }
+
+      List<Tuple> newValue = map.get(keyTuple);
+      if (newValue == null) {
+        map.put(keyTuple, newValue = new ArrayList<Tuple>());
+      }
+      // if source is scan or groupby, it needs not to be cloned
+      newValue.add(new VTuple(tuple));
+    }
+    return map;
+  }
+
+  // todo: convert loaded data to cache condition
+  protected abstract Map<Tuple, T> convert(Map<Tuple, List<Tuple>> hashed, boolean fromCache)
+      throws IOException;
+
+  protected Tuple toKey(final Tuple outerTuple) {
+    for (int i = 0; i < leftKeyList.length; i++) {
+      keyTuple.put(i, outerTuple.get(leftKeyList[i]));
+    }
+    return keyTuple;
+  }
+
+  @Override
+  public void rescan() throws IOException {
+    super.rescan();
+    finished = false;
+    iterator = null;
+  }
+
+  @Override
+  public void close() throws IOException {
+    super.close();
+    iterator = null;
+    if (tupleSlots != null) {
+      tupleSlots.clear();
+      tupleSlots = null;
+    }
+  }
+
+  @Override
+  public TableStats getInputStats() {
+    if (leftChild == null) {
+      return inputStats;
+    }
+    TableStats leftInputStats = leftChild.getInputStats();
+    inputStats.setNumBytes(0);
+    inputStats.setReadBytes(0);
+    inputStats.setNumRows(0);
+
+    if (leftInputStats != null) {
+      inputStats.setNumBytes(leftInputStats.getNumBytes());
+      inputStats.setReadBytes(leftInputStats.getReadBytes());
+      inputStats.setNumRows(leftInputStats.getNumRows());
+    }
+
+    TableStats rightInputStats = rightChild.getInputStats();
+    if (rightInputStats != null) {
+      inputStats.setNumBytes(inputStats.getNumBytes() + rightInputStats.getNumBytes());
+      inputStats.setReadBytes(inputStats.getReadBytes() + rightInputStats.getReadBytes());
+      inputStats.setNumRows(inputStats.getNumRows() + rightInputStats.getNumRows());
+    }
+
+    return inputStats;
+  }
+}

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonJoinExec.java
index 2535edf..ec29085 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/CommonJoinExec.java
@@ -18,36 +18,178 @@
 
 package org.apache.tajo.engine.planner.physical;
 
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.SchemaUtil;
 import org.apache.tajo.engine.planner.Projector;
+import org.apache.tajo.plan.expr.AlgebraicUtil;
+import org.apache.tajo.plan.expr.BinaryEval;
 import org.apache.tajo.plan.expr.EvalNode;
+import org.apache.tajo.plan.expr.EvalTreeUtil;
 import org.apache.tajo.plan.logical.JoinNode;
+import org.apache.tajo.storage.FrameTuple;
+import org.apache.tajo.storage.NullTuple;
+import org.apache.tajo.storage.Tuple;
+import org.apache.tajo.storage.VTuple;
 import org.apache.tajo.worker.TaskAttemptContext;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
 
-// common join exec except HashLeftOuterJoinExec
+/**
+ * common exec for all join execs
+ */
 public abstract class CommonJoinExec extends BinaryPhysicalExec {
 
   // from logical plan
   protected JoinNode plan;
   protected final boolean hasJoinQual;
 
-  protected EvalNode joinQual;
+  protected EvalNode joinQual;         // ex) a.id = b.id
+  protected EvalNode leftJoinFilter;   // ex) a > 10
+  protected EvalNode rightJoinFilter;  // ex) b > 5
+
+  protected final Schema leftSchema;
+  protected final Schema rightSchema;
+
+  protected final FrameTuple frameTuple;
+  protected final Tuple outTuple;
 
   // projection
   protected Projector projector;
 
-  public CommonJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec outer,
-                        PhysicalExec inner) {
+  public CommonJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec outer, PhysicalExec inner) {
     super(context, SchemaUtil.merge(outer.getSchema(), inner.getSchema()),
         plan.getOutSchema(), outer, inner);
     this.plan = plan;
-    this.joinQual = plan.getJoinQual();
-    this.hasJoinQual = plan.hasJoinQual();
+    this.leftSchema = outer.getSchema();
+    this.rightSchema = inner.getSchema();
+    if (plan.hasJoinQual()) {
+      EvalNode[] extracted = extractJoinConditions(plan.getJoinQual(), leftSchema, rightSchema);
+      joinQual = extracted[0];
+      leftJoinFilter = extracted[1];
+      rightJoinFilter = extracted[2];
+    }
+    this.hasJoinQual = joinQual != null;
 
     // for projection
     this.projector = new Projector(context, inSchema, outSchema, plan.getTargets());
+
+    // for join
+    this.frameTuple = new FrameTuple();
+    this.outTuple = new VTuple(outSchema.size());
+  }
+
+  /**
+   * It separates a singular CNF-formed join condition into a join condition, a left join filter, and
+   * right join filter.
+   *
+   * @param joinQual the original join condition
+   * @param leftSchema Left table schema
+   * @param rightSchema Left table schema
+   * @return Three element EvalNodes, 0 - join condition, 1 - left join filter, 2 - right join filter.
+   */
+  private EvalNode[] extractJoinConditions(EvalNode joinQual, Schema leftSchema, Schema rightSchema) {
+    List<EvalNode> joinQuals = Lists.newArrayList();
+    List<EvalNode> leftFilters = Lists.newArrayList();
+    List<EvalNode> rightFilters = Lists.newArrayList();
+    for (EvalNode eachQual : AlgebraicUtil.toConjunctiveNormalFormArray(joinQual)) {
+      if (!(eachQual instanceof BinaryEval)) {
+        continue; // todo 'between', etc.
+      }
+      BinaryEval binaryEval = (BinaryEval)eachQual;
+      LinkedHashSet<Column> leftColumns = EvalTreeUtil.findUniqueColumns(binaryEval.getLeftExpr());
+      LinkedHashSet<Column> rightColumns = EvalTreeUtil.findUniqueColumns(binaryEval.getRightExpr());
+      boolean leftInLeft = leftSchema.containsAny(leftColumns);
+      boolean rightInLeft = leftSchema.containsAny(rightColumns);
+      boolean leftInRight = rightSchema.containsAny(leftColumns);
+      boolean rightInRight = rightSchema.containsAny(rightColumns);
+
+      boolean columnsFromLeft = leftInLeft || rightInLeft;
+      boolean columnsFromRight = leftInRight || rightInRight;
+      if (!columnsFromLeft && !columnsFromRight) {
+        continue; // todo constant expression : this should be done in logical phase
+      }
+      if (columnsFromLeft ^ columnsFromRight) {
+        if (columnsFromLeft) {
+          leftFilters.add(eachQual);
+        } else {
+          rightFilters.add(eachQual);
+        }
+        continue;
+      }
+      if ((leftInLeft && rightInLeft) || (leftInRight && rightInRight)) {
+        continue; // todo not allowed yet : this should be checked in logical phase
+      }
+      joinQuals.add(eachQual);
+    }
+    return new EvalNode[] {
+        joinQuals.isEmpty() ? null : AlgebraicUtil.createSingletonExprFromCNF(joinQuals),
+        leftFilters.isEmpty() ? null : AlgebraicUtil.createSingletonExprFromCNF(leftFilters),
+        rightFilters.isEmpty() ? null : AlgebraicUtil.createSingletonExprFromCNF(rightFilters)
+    };
+  }
+
+  public JoinNode getPlan() {
+    return plan;
+  }
+
+  /**
+   * Evaluate an input tuple with a left join filter
+   *
+   * @param left Tuple to be evaluated
+   * @return True if an input tuple is matched to the left join filter
+   */
+  protected boolean leftFiltered(Tuple left) {
+    return leftJoinFilter != null && !leftJoinFilter.eval(left).asBool();
+  }
+
+  /**
+   * Evaluate an input tuple with a right join filter
+   *
+   * @param right Tuple to be evaluated
+   * @return True if an input tuple is matched to the right join filter
+   */
+  protected boolean rightFiltered(Tuple right) {
+    return rightJoinFilter != null && !rightJoinFilter.eval(right).asBool();
+  }
+
+  /**
+   * Return an tuple iterator filters rows in a right table by using a join filter.
+   * It must takes rows of a right table.
+   *
+   * @param rightTuples Tuple iterator
+   * @return rows Filtered by a join filter on right table.
+   */
+  protected Iterator<Tuple> rightFiltered(Iterable<Tuple> rightTuples) {
+    if (rightTuples == null) {
+      return Iterators.emptyIterator();
+    }
+    if (rightJoinFilter == null) {
+      return rightTuples.iterator();
+    }
+    return Iterators.filter(rightTuples.iterator(), new Predicate<Tuple>() {
+      @Override
+      public boolean apply(Tuple input) {
+        return rightJoinFilter.eval(input).asBool();
+      }
+    });
+  }
+
+  /**
+   * Return an tuple iterator, containing a single NullTuple
+   *
+   * @param width the width of tuple
+   * @return an tuple iterator, containing a single NullTuple
+   */
+  protected Iterator<Tuple> nullIterator(int width) {
+    return Arrays.asList(NullTuple.create(width)).iterator();
   }
 
   @Override
@@ -56,6 +198,12 @@ public abstract class CommonJoinExec extends BinaryPhysicalExec {
     if (hasJoinQual) {
       joinQual.bind(context.getEvalContext(), inSchema);
     }
+    if (leftJoinFilter != null) {
+      leftJoinFilter.bind(context.getEvalContext(), leftSchema);
+    }
+    if (rightJoinFilter != null) {
+      rightJoinFilter.bind(context.getEvalContext(), rightSchema);
+    }
   }
 
   @Override
@@ -63,10 +211,7 @@ public abstract class CommonJoinExec extends BinaryPhysicalExec {
     if (hasJoinQual) {
       joinQual = context.getPrecompiledEval(inSchema, joinQual);
     }
-  }
-
-  public JoinNode getPlan() {
-    return plan;
+    // compile filters?
   }
 
   @Override
@@ -74,6 +219,13 @@ public abstract class CommonJoinExec extends BinaryPhysicalExec {
     super.close();
     plan = null;
     joinQual = null;
+    leftJoinFilter = null;
+    rightJoinFilter = null;
     projector = null;
   }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + " [" + leftSchema + " : " + rightSchema + "]";
+  }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashFullOuterJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashFullOuterJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashFullOuterJoinExec.java
index 6e28ae0..1645263 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashFullOuterJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashFullOuterJoinExec.java
@@ -18,101 +18,59 @@
 
 package org.apache.tajo.engine.planner.physical;
 
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.engine.utils.TupleUtil;
-import org.apache.tajo.plan.util.PlannerUtil;
 import org.apache.tajo.plan.logical.JoinNode;
-import org.apache.tajo.storage.FrameTuple;
+import org.apache.tajo.storage.NullTuple;
 import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
+import org.apache.tajo.util.Pair;
 import org.apache.tajo.worker.TaskAttemptContext;
 
 import java.io.IOException;
 import java.util.*;
 
+public class HashFullOuterJoinExec extends CommonHashJoinExec<Pair<Boolean, List<Tuple>>> {
 
-public class HashFullOuterJoinExec extends CommonJoinExec {
-
-  protected List<Column[]> joinKeyPairs;
-
-  // temporal tuples and states for nested loop join
-  protected boolean first = true;
-  protected FrameTuple frameTuple;
-  protected Tuple outTuple = null;
-  protected Map<Tuple, List<Tuple>> tupleSlots;
-  protected Iterator<Tuple> iterator = null;
-  protected Tuple leftTuple;
-  protected Tuple leftKeyTuple;
-
-  protected int [] leftKeyList;
-  protected int [] rightKeyList;
-
-  protected boolean finished = false;
-  protected boolean shouldGetLeftTuple = true;
-
-  private int rightNumCols;
-  private int leftNumCols;
-  private Map<Tuple, Boolean> matched;
+  private boolean finalLoop; // final loop for right unmatched
 
   public HashFullOuterJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec outer,
                                PhysicalExec inner) {
     super(context, plan, outer, inner);
-    this.tupleSlots = new HashMap<Tuple, List<Tuple>>(10000);
-
-    // this hashmap mirrors the evolution of the tupleSlots, with the same keys. For each join key,
-    // we have a boolean flag, initially false (whether this join key had at least one match on the left operand)
-    this.matched = new HashMap<Tuple, Boolean>(10000);
-
-    // HashJoin only can manage equi join key pairs.
-    this.joinKeyPairs = PlannerUtil.getJoinKeyPairs(joinQual, outer.getSchema(), inner.getSchema(),
-        false);
-
-    leftKeyList = new int[joinKeyPairs.size()];
-    rightKeyList = new int[joinKeyPairs.size()];
-
-    for (int i = 0; i < joinKeyPairs.size(); i++) {
-      leftKeyList[i] = outer.getSchema().getColumnId(joinKeyPairs.get(i)[0].getQualifiedName());
-    }
-
-    for (int i = 0; i < joinKeyPairs.size(); i++) {
-      rightKeyList[i] = inner.getSchema().getColumnId(joinKeyPairs.get(i)[1].getQualifiedName());
-    }
-
-    // for join
-    frameTuple = new FrameTuple();
-    outTuple = new VTuple(outSchema.size());
-    leftKeyTuple = new VTuple(leftKeyList.length);
-
-    leftNumCols = outer.getSchema().size();
-    rightNumCols = inner.getSchema().size();
   }
 
-  protected void getKeyLeftTuple(final Tuple outerTuple, Tuple keyTuple) {
-    for (int i = 0; i < leftKeyList.length; i++) {
-      keyTuple.put(i, outerTuple.get(leftKeyList[i]));
-    }
-  }
+  public Iterator<Tuple> getUnmatchedRight() {
 
-  public Tuple getNextUnmatchedRight() {
+    return new Iterator<Tuple>() {
 
-    List<Tuple> newValue;
-    Tuple returnedTuple;
-    // get a keyTUple from the matched hashmap with a boolean false value
-    for(Tuple aKeyTuple : matched.keySet()) {
-      if(matched.get(aKeyTuple) == false) {
-        newValue = tupleSlots.get(aKeyTuple);
-        returnedTuple = newValue.remove(0);
-        tupleSlots.put(aKeyTuple, newValue);
+      private Iterator<Pair<Boolean, List<Tuple>>> iterator1 = tupleSlots.values().iterator();
+      private Iterator<Tuple> iterator2;
 
-        // after taking the last element from the list in tupleSlots, set flag true in matched as well
-        if(newValue.isEmpty()){
-          matched.put(aKeyTuple, true);
+      @Override
+      public boolean hasNext() {
+        if (hasMore()) {
+          return true;
         }
+        for (iterator2 = null; !hasMore() && iterator1.hasNext();) {
+          Pair<Boolean, List<Tuple>> next = iterator1.next();
+          if (!next.getFirst()) {
+            iterator2 = next.getSecond().iterator();
+          }
+        }
+        return hasMore();
+      }
 
-        return returnedTuple;
+      private boolean hasMore() {
+        return iterator2 != null && iterator2.hasNext();
       }
-    }
-    return null;
+
+      @Override
+      public Tuple next() {
+        return iterator2.next();
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException("remove");
+      }
+    };
   }
 
   public Tuple next() throws IOException {
@@ -120,112 +78,67 @@ public class HashFullOuterJoinExec extends CommonJoinExec {
       loadRightToHashTable();
     }
 
-    Tuple rightTuple;
-    boolean found = false;
-
-    while(!context.isStopped() && !finished) {
-      if (shouldGetLeftTuple) { // initially, it is true.
-        // getting new outer
-        leftTuple = leftChild.next(); // it comes from a disk
-        if (leftTuple == null) { // if no more tuples in left tuples on disk, a join is completed.
-          // in this stage we can begin outputing tuples from the right operand (which were before in tupleSlots) null padded on the left side
-          Tuple unmatchedRightTuple = getNextUnmatchedRight();
-          if( unmatchedRightTuple == null) {
-            finished = true;
-            outTuple = null;
-            return null;
-          } else {
-            Tuple nullPaddedTuple = TupleUtil.createNullPaddedTuple(leftNumCols);
-            frameTuple.set(nullPaddedTuple, unmatchedRightTuple);
-            projector.eval(frameTuple, outTuple);
-
-            return outTuple;
-          }
-        }
-
-        // getting corresponding right
-        getKeyLeftTuple(leftTuple, leftKeyTuple); // get a left key tuple
-        List<Tuple> rightTuples = tupleSlots.get(leftKeyTuple);
-        if (rightTuples != null) { // found right tuples on in-memory hash table.
-          iterator = rightTuples.iterator();
-          shouldGetLeftTuple = false;
-        } else {
-          //this left tuple doesn't have a match on the right.But full outer join => we should keep it anyway
-          //output a tuple with the nulls padded rightTuple
-          Tuple nullPaddedTuple = TupleUtil.createNullPaddedTuple(rightNumCols);
-          frameTuple.set(leftTuple, nullPaddedTuple);
-          projector.eval(frameTuple, outTuple);
-          // we simulate we found a match, which is exactly the null padded one
-          shouldGetLeftTuple = true;
-          return outTuple;
-        }
-      }
-
-      // getting a next right tuple on in-memory hash table.
-      rightTuple = iterator.next();
-      frameTuple.set(leftTuple, rightTuple); // evaluate a join condition on both tuples
-
-      if (joinQual.eval(frameTuple).isTrue()) { // if both tuples are joinable
+    while (!context.isStopped() && !finished) {
+      if (iterator != null && iterator.hasNext()) {
+        frameTuple.setRight(iterator.next());
         projector.eval(frameTuple, outTuple);
-        found = true;
-        getKeyLeftTuple(leftTuple, leftKeyTuple);
-        matched.put(leftKeyTuple, true);
+        return outTuple;
       }
-
-      if (!iterator.hasNext()) { // no more right tuples for this hash key
-        shouldGetLeftTuple = true;
+      if (finalLoop) {
+        finished = true;
+        return null;
       }
-
-      if (found) {
-        break;
+      Tuple leftTuple = leftChild.next();
+      if (leftTuple == null) {
+      // if no more tuples in left tuples, a join is completed.
+        // in this stage we can begin outputing tuples from the right operand (which were before in tupleSlots) null padded on the left side
+        frameTuple.setLeft(NullTuple.create(leftNumCols));
+        iterator = getUnmatchedRight();
+        finalLoop = true;
+        continue;
       }
-    }
-    return outTuple;
-  }
-
-  protected void loadRightToHashTable() throws IOException {
-    Tuple tuple;
-    Tuple keyTuple;
+      frameTuple.setLeft(leftTuple);
 
-    while (!context.isStopped() && (tuple = rightChild.next()) != null) {
-      keyTuple = new VTuple(joinKeyPairs.size());
-      for (int i = 0; i < rightKeyList.length; i++) {
-        keyTuple.put(i, tuple.get(rightKeyList[i]));
+      if (leftFiltered(leftTuple)) {
+        iterator = nullIterator(rightNumCols);
+        continue;
       }
-
-      List<Tuple> newValue = tupleSlots.get(keyTuple);
-      if (newValue != null) {
-        newValue.add(tuple);
-      } else {
-        newValue = new ArrayList<Tuple>();
-        newValue.add(tuple);
-        tupleSlots.put(keyTuple, newValue);
-        matched.put(keyTuple,false);
+      // getting corresponding right
+      Pair<Boolean, List<Tuple>> hashed = tupleSlots.get(toKey(leftTuple));
+      if (hashed == null) {
+        iterator = nullIterator(rightNumCols);
+        continue;
+      }
+      Iterator<Tuple> rightTuples = rightFiltered(hashed.getSecond());
+      if (!rightTuples.hasNext()) {
+        iterator = nullIterator(rightNumCols);
+        continue;
       }
+      iterator = rightTuples;
+      hashed.setFirst(true);   // match found
     }
-    first = false;
+
+    return null;
   }
 
   @Override
-  public void rescan() throws IOException {
-    super.rescan();
-
-    tupleSlots.clear();
-    first = true;
-
-    finished = false;
-    iterator = null;
-    shouldGetLeftTuple = true;
+  protected Map<Tuple, Pair<Boolean, List<Tuple>>> convert(Map<Tuple, List<Tuple>> hashed,
+                                                           boolean fromCache) throws IOException {
+    Map<Tuple, Pair<Boolean, List<Tuple>>> tuples = new HashMap<Tuple, Pair<Boolean, List<Tuple>>>(hashed.size());
+    for (Map.Entry<Tuple, List<Tuple>> entry : hashed.entrySet()) {
+      // flag: initially false (whether this join key had at least one match on the counter part)
+      tuples.put(entry.getKey(), new Pair<Boolean, List<Tuple>>(false, entry.getValue()));
+    }
+    return tuples;
   }
 
   @Override
-  public void close() throws IOException {
-    super.close();
-    tupleSlots.clear();
-    matched.clear();
-    tupleSlots = null;
-    matched = null;
-    iterator = null;
+  public void rescan() throws IOException {
+    super.rescan();
+    for (Pair<Boolean, List<Tuple>> value : tupleSlots.values()) {
+      value.setFirst(false);
+    }
+    finalLoop = false;
   }
 }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashJoinExec.java
index 48f3682..a4215fa 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashJoinExec.java
@@ -18,225 +18,59 @@
 
 package org.apache.tajo.engine.planner.physical;
 
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.statistics.TableStats;
-import org.apache.tajo.engine.utils.CacheHolder;
-import org.apache.tajo.engine.utils.TableCacheKey;
 import org.apache.tajo.plan.logical.JoinNode;
-import org.apache.tajo.plan.util.PlannerUtil;
-import org.apache.tajo.storage.FrameTuple;
 import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
-import org.apache.tajo.worker.ExecutionBlockSharedResource;
 import org.apache.tajo.worker.TaskAttemptContext;
 
 import java.io.IOException;
 import java.util.*;
 
-public class HashJoinExec extends CommonJoinExec {
-
-  protected List<Column[]> joinKeyPairs;
-
-  // temporal tuples and states for nested loop join
-  protected boolean first = true;
-  protected FrameTuple frameTuple;
-  protected Tuple outTuple = null;
-  protected Map<Tuple, List<Tuple>> tupleSlots;
-  protected Iterator<Tuple> iterator = null;
-  protected Tuple leftTuple;
-  protected Tuple leftKeyTuple;
-
-  protected int [] leftKeyList;
-  protected int [] rightKeyList;
-
-  protected boolean finished = false;
-  protected boolean shouldGetLeftTuple = true;
-
-  private TableStats cachedRightTableStats;
+public class HashJoinExec extends CommonHashJoinExec<List<Tuple>> {
 
   public HashJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec leftExec,
       PhysicalExec rightExec) {
     super(context, plan, leftExec, rightExec);
-
-    // HashJoin only can manage equi join key pairs.
-    this.joinKeyPairs = PlannerUtil.getJoinKeyPairs(joinQual, leftExec.getSchema(),
-        rightExec.getSchema(), false);
-
-    leftKeyList = new int[joinKeyPairs.size()];
-    rightKeyList = new int[joinKeyPairs.size()];
-
-    for (int i = 0; i < joinKeyPairs.size(); i++) {
-      leftKeyList[i] = leftExec.getSchema().getColumnId(joinKeyPairs.get(i)[0].getQualifiedName());
-    }
-
-    for (int i = 0; i < joinKeyPairs.size(); i++) {
-      rightKeyList[i] = rightExec.getSchema().getColumnId(joinKeyPairs.get(i)[1].getQualifiedName());
-    }
-
-    // for join
-    frameTuple = new FrameTuple();
-    outTuple = new VTuple(outSchema.size());
-    leftKeyTuple = new VTuple(leftKeyList.length);
   }
 
-  protected void getKeyLeftTuple(final Tuple outerTuple, Tuple keyTuple) {
-    for (int i = 0; i < leftKeyList.length; i++) {
-      keyTuple.put(i, outerTuple.get(leftKeyList[i]));
-    }
+  @Override
+  protected Map<Tuple, List<Tuple>> convert(Map<Tuple, List<Tuple>> hashed, boolean fromCache)
+      throws IOException {
+    return fromCache ? new HashMap<Tuple, List<Tuple>>(hashed) : hashed;
   }
 
+  @Override
   public Tuple next() throws IOException {
     if (first) {
       loadRightToHashTable();
     }
 
-    Tuple rightTuple;
-    boolean found = false;
-
-    while(!context.isStopped() && !finished) {
-      if (shouldGetLeftTuple) { // initially, it is true.
-        // getting new outer
-        leftTuple = leftChild.next(); // it comes from a disk
-        if (leftTuple == null) { // if no more tuples in left tuples on disk, a join is completed.
-          finished = true;
-          return null;
-        }
-
-        // getting corresponding right
-        getKeyLeftTuple(leftTuple, leftKeyTuple); // get a left key tuple
-        List<Tuple> rightTuples = tupleSlots.get(leftKeyTuple);
-        if (rightTuples != null) { // found right tuples on in-memory hash table.
-          iterator = rightTuples.iterator();
-          shouldGetLeftTuple = false;
-        } else {
-          shouldGetLeftTuple = true;
-          continue;
-        }
-      }
-
-      // getting a next right tuple on in-memory hash table.
-      rightTuple = iterator.next();
-      frameTuple.set(leftTuple, rightTuple); // evaluate a join condition on both tuples
-      if (joinQual.eval(frameTuple).isTrue()) { // if both tuples are joinable
+    while (!context.isStopped() && !finished) {
+      if (iterator != null && iterator.hasNext()) {
+        frameTuple.setRight(iterator.next());
         projector.eval(frameTuple, outTuple);
-        found = true;
-      }
-
-      if (!iterator.hasNext()) { // no more right tuples for this hash key
-        shouldGetLeftTuple = true;
-      }
-
-      if (found) {
-        break;
-      }
-    }
-
-    return new VTuple(outTuple);
-  }
-
-  protected void loadRightToHashTable() throws IOException {
-    ScanExec scanExec = PhysicalPlanUtil.findExecutor(rightChild, ScanExec.class);
-    if (scanExec.canBroadcast()) {
-      /* If this table can broadcast, all tasks in a node will share the same cache */
-      TableCacheKey key = CacheHolder.BroadcastCacheHolder.getCacheKey(
-          context, scanExec.getCanonicalName(), scanExec.getFragments());
-      loadRightFromCache(key);
-    } else {
-      this.tupleSlots = buildRightToHashTable();
-    }
-
-    first = false;
-  }
-
-  protected void loadRightFromCache(TableCacheKey key) throws IOException {
-    ExecutionBlockSharedResource sharedResource = context.getSharedResource();
-    synchronized (sharedResource.getLock()) {
-      if (sharedResource.hasBroadcastCache(key)) {
-        CacheHolder<Map<Tuple, List<Tuple>>> data = sharedResource.getBroadcastCache(key);
-        this.tupleSlots = data.getData();
-        this.cachedRightTableStats = data.getTableStats();
-      } else {
-        CacheHolder.BroadcastCacheHolder holder =
-            new CacheHolder.BroadcastCacheHolder(buildRightToHashTable(), rightChild.getInputStats(), null);
-        sharedResource.addBroadcastCache(key, holder);
-        CacheHolder<Map<Tuple, List<Tuple>>> data = sharedResource.getBroadcastCache(key);
-        this.tupleSlots = data.getData();
-        this.cachedRightTableStats = data.getTableStats();
+        return outTuple;
       }
-    }
-  }
-
-  private Map<Tuple, List<Tuple>> buildRightToHashTable() throws IOException {
-    Tuple tuple;
-    Tuple keyTuple;
-    Map<Tuple, List<Tuple>> map = new HashMap<Tuple, List<Tuple>>(100000);
-
-    while (!context.isStopped() && (tuple = rightChild.next()) != null) {
-      keyTuple = new VTuple(joinKeyPairs.size());
-      for (int i = 0; i < rightKeyList.length; i++) {
-        keyTuple.put(i, tuple.get(rightKeyList[i]));
+      Tuple leftTuple = leftChild.next(); // it comes from a disk
+      if (leftTuple == null || leftFiltered(leftTuple)) { // if no more tuples in left tuples on disk, a join is completed.
+        finished = leftTuple == null;
+        continue;
       }
 
-      List<Tuple> newValue = map.get(keyTuple);
+      frameTuple.setLeft(leftTuple);
 
-      if (newValue != null) {
-        newValue.add(tuple);
-      } else {
-        newValue = new ArrayList<Tuple>();
-        newValue.add(tuple);
-        map.put(keyTuple, newValue);
+      // getting corresponding right
+      Iterable<Tuple> hashed = getRights(toKey(leftTuple));
+      Iterator<Tuple> rightTuples = rightFiltered(hashed);
+      if (rightTuples.hasNext()) {
+        iterator = rightTuples;
       }
     }
 
-    return map;
-  }
-
-  @Override
-  public void rescan() throws IOException {
-    super.rescan();
-
-    tupleSlots.clear();
-    first = true;
-
-    finished = false;
-    iterator = null;
-    shouldGetLeftTuple = true;
+    return null;
   }
 
-  @Override
-  public void close() throws IOException {
-    super.close();
-    if (tupleSlots != null) {
-      tupleSlots.clear();
-      tupleSlots = null;
-    }
-
-    iterator = null;
+  private Iterable<Tuple> getRights(Tuple key) {
+    return tupleSlots.get(key);
   }
 
-  @Override
-  public TableStats getInputStats() {
-    if (leftChild == null) {
-      return inputStats;
-    }
-    TableStats leftInputStats = leftChild.getInputStats();
-    inputStats.setNumBytes(0);
-    inputStats.setReadBytes(0);
-    inputStats.setNumRows(0);
-
-    if (leftInputStats != null) {
-      inputStats.setNumBytes(leftInputStats.getNumBytes());
-      inputStats.setReadBytes(leftInputStats.getReadBytes());
-      inputStats.setNumRows(leftInputStats.getNumRows());
-    }
-
-    TableStats rightInputStats = cachedRightTableStats == null ? rightChild.getInputStats() : cachedRightTableStats;
-    if (rightInputStats != null) {
-      inputStats.setNumBytes(inputStats.getNumBytes() + rightInputStats.getNumBytes());
-      inputStats.setReadBytes(inputStats.getReadBytes() + rightInputStats.getReadBytes());
-      inputStats.setNumRows(inputStats.getNumRows() + rightInputStats.getNumRows());
-    }
-
-    return inputStats;
-  }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftAntiJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftAntiJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftAntiJoinExec.java
index 881bf84..8239270 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftAntiJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftAntiJoinExec.java
@@ -19,10 +19,8 @@
 package org.apache.tajo.engine.planner.physical;
 
 import org.apache.tajo.worker.TaskAttemptContext;
-import org.apache.tajo.datum.NullDatum;
 import org.apache.tajo.plan.logical.JoinNode;
 import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
 
 import java.io.IOException;
 import java.util.List;
@@ -33,16 +31,10 @@ import java.util.List;
  * If not found, it returns the tuple of the FROM side table with null padding.
  */
 public class HashLeftAntiJoinExec extends HashJoinExec {
-  private Tuple rightNullTuple;
 
   public HashLeftAntiJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec fromSideChild,
                               PhysicalExec notInSideChild) {
     super(context, plan, fromSideChild, notInSideChild);
-    // NUll Tuple
-    rightNullTuple = new VTuple(leftChild.outColumnNum);
-    for (int i = 0; i < leftChild.outColumnNum; i++) {
-      rightNullTuple.put(i, NullDatum.get());
-    }
   }
 
   /**
@@ -56,54 +48,33 @@ public class HashLeftAntiJoinExec extends HashJoinExec {
    * @return The tuple which is unmatched to a given join condition.
    * @throws IOException
    */
+  @Override
   public Tuple next() throws IOException {
     if (first) {
       loadRightToHashTable();
     }
 
-    Tuple rightTuple;
-    boolean notFound;
-
     while(!context.isStopped() && !finished) {
-
-      // getting new outer
-      leftTuple = leftChild.next(); // it comes from a disk
-      if (leftTuple == null) { // if no more tuples in left tuples on disk, a join is completed.
-        finished = true;
-        return null;
-      }
-
-      // Try to find a hash bucket in in-memory hash table
-      getKeyLeftTuple(leftTuple, leftKeyTuple);
-      List<Tuple> rightTuples = tupleSlots.get(leftKeyTuple);
-      if (rightTuples != null) {
-        // if found, it gets a hash bucket from the hash table.
-        iterator = rightTuples.iterator();
-      } else {
-        // if not found, it returns a tuple.
-        frameTuple.set(leftTuple, rightNullTuple);
+      if (iterator != null && iterator.hasNext()) {
+        frameTuple.setRight(iterator.next());
         projector.eval(frameTuple, outTuple);
         return outTuple;
       }
-
-      // Reach here only when a hash bucket is found. Then, it checks all tuples in the found bucket.
-      // If it finds a matched tuple, it escapes the loop for all tuples in the hash bucket.
-      notFound = true;
-      while (!context.isStopped() && notFound && iterator.hasNext()) {
-        rightTuple = iterator.next();
-        frameTuple.set(leftTuple, rightTuple);
-        if (joinQual.eval(frameTuple).isTrue()) { // if the matched one is found
-          notFound = false;
-        }
+      // getting new outer
+      Tuple leftTuple = leftChild.next(); // it comes from a disk
+      if (leftTuple == null || leftFiltered(leftTuple)) { // if no more tuples in left tuples on disk, a join is completed.
+        finished = leftTuple == null;
+        continue;
       }
 
-      if (notFound) { // if there is no matched tuple
-        frameTuple.set(leftTuple, rightNullTuple);
-        projector.eval(frameTuple, outTuple);
-        break;
+      frameTuple.setLeft(leftTuple);
+
+      // Try to find a hash bucket in in-memory hash table
+      List<Tuple> hashed = tupleSlots.get(toKey(leftTuple));
+      if (hashed == null || !rightFiltered(hashed).hasNext()) {
+        iterator = nullIterator(0);
       }
     }
-
-    return outTuple;
+    return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftOuterJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftOuterJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftOuterJoinExec.java
index 6f573d0..8613eac 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftOuterJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftOuterJoinExec.java
@@ -18,307 +18,61 @@
 
 package org.apache.tajo.engine.planner.physical;
 
-import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.statistics.TableStats;
-import org.apache.tajo.engine.planner.Projector;
-import org.apache.tajo.engine.utils.CacheHolder;
-import org.apache.tajo.engine.utils.TableCacheKey;
-import org.apache.tajo.engine.utils.TupleUtil;
-import org.apache.tajo.plan.util.PlannerUtil;
-import org.apache.tajo.catalog.SchemaUtil;
-import org.apache.tajo.plan.expr.AlgebraicUtil;
-import org.apache.tajo.plan.expr.EvalNode;
-import org.apache.tajo.plan.expr.EvalTreeUtil;
 import org.apache.tajo.plan.logical.JoinNode;
-import org.apache.tajo.storage.FrameTuple;
 import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
-import org.apache.tajo.worker.ExecutionBlockSharedResource;
 import org.apache.tajo.worker.TaskAttemptContext;
 
 import java.io.IOException;
 import java.util.*;
 
+public class HashLeftOuterJoinExec extends HashJoinExec {
 
-public class HashLeftOuterJoinExec extends BinaryPhysicalExec {
-  // from logical plan
-  protected JoinNode plan;
-  protected EvalNode joinQual;   // ex) a.id = b.id
-  protected EvalNode joinFilter; // ex) a > 10
-
-  protected List<Column[]> joinKeyPairs;
-
-  // temporal tuples and states for nested loop join
-  protected boolean first = true;
-  protected FrameTuple frameTuple;
-  protected Tuple outTuple = null;
-  protected Map<Tuple, List<Tuple>> tupleSlots;
-  protected Iterator<Tuple> iterator = null;
-  protected Tuple leftTuple;
-  protected Tuple leftKeyTuple;
-
-  protected int [] leftKeyList;
-  protected int [] rightKeyList;
-
-  protected boolean finished = false;
-  protected boolean shouldGetLeftTuple = true;
-
-  // projection
-  protected Projector projector;
-
-  private int rightNumCols;
-  private TableStats cachedRightTableStats;
   private static final Log LOG = LogFactory.getLog(HashLeftOuterJoinExec.class);
 
   public HashLeftOuterJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec leftChild,
                                PhysicalExec rightChild) {
-    super(context, SchemaUtil.merge(leftChild.getSchema(), rightChild.getSchema()),
-        plan.getOutSchema(), leftChild, rightChild);
-    this.plan = plan;
-
-    List<EvalNode> joinQuals = Lists.newArrayList();
-    List<EvalNode> joinFilters = Lists.newArrayList();
-    for (EvalNode eachQual : AlgebraicUtil.toConjunctiveNormalFormArray(plan.getJoinQual())) {
-      if (EvalTreeUtil.isJoinQual(eachQual, true)) {
-        joinQuals.add(eachQual);
-      } else {
-        joinFilters.add(eachQual);
-      }
-    }
-
-    this.joinQual = AlgebraicUtil.createSingletonExprFromCNF(joinQuals.toArray(new EvalNode[joinQuals.size()]));
-    if (joinFilters.size() > 0) {
-      this.joinFilter = AlgebraicUtil.createSingletonExprFromCNF(joinFilters.toArray(new EvalNode[joinFilters.size()]));
-    } else {
-      this.joinFilter = null;
-    }
-
-    // HashJoin only can manage equi join key pairs.
-    this.joinKeyPairs = PlannerUtil.getJoinKeyPairs(joinQual, leftChild.getSchema(),
-        rightChild.getSchema(), false);
-
-    leftKeyList = new int[joinKeyPairs.size()];
-    rightKeyList = new int[joinKeyPairs.size()];
-
-    for (int i = 0; i < joinKeyPairs.size(); i++) {
-      leftKeyList[i] = leftChild.getSchema().getColumnId(joinKeyPairs.get(i)[0].getQualifiedName());
-    }
-
-    for (int i = 0; i < joinKeyPairs.size(); i++) {
-      rightKeyList[i] = rightChild.getSchema().getColumnId(joinKeyPairs.get(i)[1].getQualifiedName());
-    }
-
-    // for projection
-    this.projector = new Projector(context, inSchema, outSchema, plan.getTargets());
-
-    // for join
-    frameTuple = new FrameTuple();
-    outTuple = new VTuple(outSchema.size());
-    leftKeyTuple = new VTuple(leftKeyList.length);
-
-    rightNumCols = rightChild.getSchema().size();
-
-    joinQual.bind(context.getEvalContext(), inSchema);
-    if (joinFilter != null) {
-      joinFilter.bind(context.getEvalContext(), inSchema);
-    }
+    super(context, plan, leftChild, rightChild);
   }
 
   @Override
-  protected void compile() {
-    joinQual = context.getPrecompiledEval(inSchema, joinQual);
-  }
-
-  protected void getKeyLeftTuple(final Tuple outerTuple, Tuple keyTuple) {
-    for (int i = 0; i < leftKeyList.length; i++) {
-      keyTuple.put(i, outerTuple.get(leftKeyList[i]));
-    }
-  }
-
   public Tuple next() throws IOException {
     if (first) {
       loadRightToHashTable();
     }
 
-    Tuple rightTuple;
-    boolean found = false;
-
-    while(!context.isStopped() && !finished) {
-
-      if (shouldGetLeftTuple) { // initially, it is true.
-        // getting new outer
-        leftTuple = leftChild.next(); // it comes from a disk
-        if (leftTuple == null) { // if no more tuples in left tuples on disk, a join is completed.
-          finished = true;
-          return null;
-        }
-
-        // getting corresponding right
-        getKeyLeftTuple(leftTuple, leftKeyTuple); // get a left key tuple
-        List<Tuple> rightTuples = tupleSlots.get(leftKeyTuple);
-        if (rightTuples != null) { // found right tuples on in-memory hash table.
-          iterator = rightTuples.iterator();
-          shouldGetLeftTuple = false;
-        } else {
-          // this left tuple doesn't have a match on the right, and output a tuple with the nulls padded rightTuple
-          Tuple nullPaddedTuple = TupleUtil.createNullPaddedTuple(rightNumCols);
-          frameTuple.set(leftTuple, nullPaddedTuple);
-          projector.eval(frameTuple, outTuple);
-          // we simulate we found a match, which is exactly the null padded one
-          shouldGetLeftTuple = true;
-          return outTuple;
-        }
-      }
-
-      // getting a next right tuple on in-memory hash table.
-      rightTuple = iterator.next();
-      if (!iterator.hasNext()) { // no more right tuples for this hash key
-        shouldGetLeftTuple = true;
-      }
-
-      frameTuple.set(leftTuple, rightTuple); // evaluate a join condition on both tuples
-
-      // if there is no join filter, it is always true.
-      boolean satisfiedWithFilter = joinFilter == null || joinFilter.eval(frameTuple).isTrue();
-      boolean satisfiedWithJoinCondition = joinQual.eval(frameTuple).isTrue();
-
-      // if a composited tuple satisfies with both join filter and join condition
-      if (satisfiedWithJoinCondition && satisfiedWithFilter) {
-        projector.eval(frameTuple, outTuple);
-        return outTuple;
-      } else {
-
-        // if join filter is satisfied, the left outer join (LOJ) operator should return the null padded tuple
-        // only once. Then, LOJ operator should take the next left tuple.
-        if (!satisfiedWithFilter) {
-          shouldGetLeftTuple = true;
-        }
-
-        // null padding
-        Tuple nullPaddedTuple = TupleUtil.createNullPaddedTuple(rightNumCols);
-        frameTuple.set(leftTuple, nullPaddedTuple);
-
+    while (!context.isStopped() && !finished) {
+      if (iterator != null && iterator.hasNext()) {
+        frameTuple.setRight(iterator.next());
         projector.eval(frameTuple, outTuple);
         return outTuple;
       }
-    }
-
-    return outTuple;
-  }
-
-  protected void loadRightToHashTable() throws IOException {
-    ScanExec scanExec = PhysicalPlanUtil.findExecutor(rightChild, ScanExec.class);
-    if (scanExec.canBroadcast()) {
-      /* If this table can broadcast, all tasks in a node will share the same cache */
-      TableCacheKey key = CacheHolder.BroadcastCacheHolder.getCacheKey(
-          context, scanExec.getCanonicalName(), scanExec.getFragments());
-      loadRightFromCache(key);
-    } else {
-      this.tupleSlots = buildRightToHashTable();
-    }
-
-    first = false;
-  }
-
-  protected void loadRightFromCache(TableCacheKey key) throws IOException {
-    ExecutionBlockSharedResource sharedResource = context.getSharedResource();
-    synchronized (sharedResource.getLock()) {
-      if (sharedResource.hasBroadcastCache(key)) {
-        CacheHolder<Map<Tuple, List<Tuple>>> data = sharedResource.getBroadcastCache(key);
-        this.tupleSlots = data.getData();
-        this.cachedRightTableStats = data.getTableStats();
-      } else {
-        CacheHolder.BroadcastCacheHolder holder =
-            new CacheHolder.BroadcastCacheHolder(buildRightToHashTable(), rightChild.getInputStats(), null);
-        sharedResource.addBroadcastCache(key, holder);
-        CacheHolder<Map<Tuple, List<Tuple>>> data = sharedResource.getBroadcastCache(key);
-        this.tupleSlots = data.getData();
-        this.cachedRightTableStats = data.getTableStats();
+      Tuple leftTuple = leftChild.next(); // it comes from a disk
+      if (leftTuple == null) { // if no more tuples in left tuples on disk, a join is completed.
+        finished = true;
+        return null;
       }
-    }
-  }
+      frameTuple.setLeft(leftTuple);
 
-  private Map<Tuple, List<Tuple>> buildRightToHashTable() throws IOException {
-    Tuple tuple;
-    Tuple keyTuple;
-    Map<Tuple, List<Tuple>> map = new HashMap<Tuple, List<Tuple>>(100000);
-
-    while (!context.isStopped() && (tuple = rightChild.next()) != null) {
-      keyTuple = new VTuple(joinKeyPairs.size());
-      for (int i = 0; i < rightKeyList.length; i++) {
-        keyTuple.put(i, tuple.get(rightKeyList[i]));
+      if (leftFiltered(leftTuple)) {
+        iterator = nullIterator(rightNumCols);
+        continue;
       }
 
-      List<Tuple> newValue = map.get(keyTuple);
-
-      if (newValue != null) {
-        newValue.add(tuple);
-      } else {
-        newValue = new ArrayList<Tuple>();
-        newValue.add(tuple);
-        map.put(keyTuple, newValue);
+      // getting corresponding right
+      List<Tuple> hashed = tupleSlots.get(toKey(leftTuple));
+      Iterator<Tuple> rightTuples = rightFiltered(hashed);
+      if (!rightTuples.hasNext()) {
+        //this left tuple doesn't have a match on the right.But full outer join => we should keep it anyway
+        //output a tuple with the nulls padded rightTuple
+        iterator = nullIterator(rightNumCols);
+        continue;
       }
+      iterator = rightTuples;
     }
 
-    return map;
-  }
-
-  @Override
-  public void rescan() throws IOException {
-    super.rescan();
-
-    tupleSlots.clear();
-    first = true;
-
-    finished = false;
-    iterator = null;
-    shouldGetLeftTuple = true;
-  }
-
-
-  @Override
-  public void close() throws IOException {
-    super.close();
-    tupleSlots.clear();
-    tupleSlots = null;
-    iterator = null;
-    plan = null;
-    joinQual = null;
-    joinFilter = null;
-    projector = null;
-  }
-
-  public JoinNode getPlan() {
-    return this.plan;
-  }
-
-  @Override
-  public TableStats getInputStats() {
-    if (leftChild == null) {
-      return inputStats;
-    }
-    TableStats leftInputStats = leftChild.getInputStats();
-    inputStats.setNumBytes(0);
-    inputStats.setReadBytes(0);
-    inputStats.setNumRows(0);
-
-    if (leftInputStats != null) {
-      inputStats.setNumBytes(leftInputStats.getNumBytes());
-      inputStats.setReadBytes(leftInputStats.getReadBytes());
-      inputStats.setNumRows(leftInputStats.getNumRows());
-    }
-
-    TableStats rightInputStats = cachedRightTableStats == null ? rightChild.getInputStats() : cachedRightTableStats;
-    if (rightInputStats != null) {
-      inputStats.setNumBytes(inputStats.getNumBytes() + rightInputStats.getNumBytes());
-      inputStats.setReadBytes(inputStats.getReadBytes() + rightInputStats.getReadBytes());
-      inputStats.setNumRows(inputStats.getNumRows() + rightInputStats.getNumRows());
-    }
-
-    return inputStats;
+    return null;
   }
 }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftSemiJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftSemiJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftSemiJoinExec.java
index 32e6d08..41e842a 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftSemiJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/HashLeftSemiJoinExec.java
@@ -50,50 +50,34 @@ public class HashLeftSemiJoinExec extends HashJoinExec {
    * @return The tuple which is firstly matched to a given join condition.
    * @throws java.io.IOException
    */
+  @Override
   public Tuple next() throws IOException {
     if (first) {
       loadRightToHashTable();
     }
 
-    Tuple rightTuple;
-    boolean notFound;
-
     while(!context.isStopped() && !finished) {
-
-      // getting new outer
-      leftTuple = leftChild.next(); // it comes from a disk
-      if (leftTuple == null) { // if no more tuples in left tuples on disk, a join is completed.
-        finished = true;
-        return null;
+      if (iterator != null && iterator.hasNext()) {
+        frameTuple.setRight(iterator.next());
+        projector.eval(frameTuple, outTuple);
+        return outTuple;
       }
-
-      // Try to find a hash bucket in in-memory hash table
-      getKeyLeftTuple(leftTuple, leftKeyTuple);
-      List<Tuple> rightTuples = tupleSlots.get(leftKeyTuple);
-      if (rightTuples != null) {
-        // if found, it gets a hash bucket from the hash table.
-        iterator = rightTuples.iterator();
-      } else {
+      // getting new outer
+      Tuple leftTuple = leftChild.next(); // it comes from a disk
+      if (leftTuple == null || leftFiltered(leftTuple)) { // if no more tuples in left tuples on disk, a join is completed.
+        finished = leftTuple == null;
         continue;
       }
 
-      // Reach here only when a hash bucket is found. Then, it checks all tuples in the found bucket.
-      // If it finds any matched tuple, it returns the tuple immediately.
-      notFound = true;
-      while (notFound && iterator.hasNext()) {
-        rightTuple = iterator.next();
-        frameTuple.set(leftTuple, rightTuple);
-        if (joinQual.eval(frameTuple).isTrue()) { // if the matched one is found
-          notFound = false;
-          projector.eval(frameTuple, outTuple);
-        }
-      }
+      frameTuple.setLeft(leftTuple);
 
-      if (!notFound) { // if there is no matched tuple
-        break;
+      // Try to find a hash bucket in in-memory hash table
+      List<Tuple> hashed = tupleSlots.get(toKey(leftTuple));
+      if (hashed != null && rightFiltered(hashed).hasNext()) {
+        // if found, it gets a hash bucket from the hash table.
+        iterator = nullIterator(0);
       }
     }
-
-    return outTuple;
+    return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/NLLeftOuterJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/NLLeftOuterJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/NLLeftOuterJoinExec.java
deleted file mode 100644
index 735623d..0000000
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/NLLeftOuterJoinExec.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.engine.planner.physical;
-
-import org.apache.tajo.engine.utils.TupleUtil;
-import org.apache.tajo.plan.logical.JoinNode;
-import org.apache.tajo.storage.FrameTuple;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.VTuple;
-import org.apache.tajo.worker.TaskAttemptContext;
-
-import java.io.IOException;
-
-public class NLLeftOuterJoinExec extends CommonJoinExec {
-  // temporal tuples and states for nested loop join
-  private boolean needNextRightTuple;
-  private FrameTuple frameTuple;
-  private Tuple leftTuple = null;
-  private Tuple rightTuple = null;
-  private Tuple outTuple = null;
-
-  private boolean foundAtLeastOneMatch;
-  private int rightNumCols;
-
-  public NLLeftOuterJoinExec(TaskAttemptContext context, JoinNode plan, PhysicalExec leftChild,
-                             PhysicalExec rightChild) {
-    super(context, plan, leftChild, rightChild);
-    // for join
-    needNextRightTuple = true;
-    frameTuple = new FrameTuple();
-    outTuple = new VTuple(outSchema.size());
-
-    foundAtLeastOneMatch = false;
-    rightNumCols = rightChild.getSchema().size();
-  }
-
-  public Tuple next() throws IOException {
-    while (!context.isStopped()) {
-      if (needNextRightTuple) {
-        leftTuple = leftChild.next();
-        if (leftTuple == null) {
-          return null;
-        }
-        needNextRightTuple = false;
-        // a new tuple from the left child has initially no matches on the right operand
-        foundAtLeastOneMatch = false;
-      }
-      rightTuple = rightChild.next();
-
-      if (rightTuple == null) {
-        // the scan of the right operand is finished with no matches found
-        if(foundAtLeastOneMatch == false){
-          //output a tuple with the nulls padded rightTuple
-          Tuple nullPaddedTuple = TupleUtil.createNullPaddedTuple(rightNumCols);
-          frameTuple.set(leftTuple, nullPaddedTuple);
-          projector.eval(frameTuple, outTuple);
-          // we simulate we found a match, which is exactly the null padded one
-          foundAtLeastOneMatch = true;
-          needNextRightTuple = true;
-          rightChild.rescan();
-          return outTuple;
-        } else {
-          needNextRightTuple = true;
-          rightChild.rescan();
-          continue;
-        }
-      }
-
-      frameTuple.set(leftTuple, rightTuple);
-      ;
-      if (joinQual.eval(frameTuple).isTrue()) {
-        projector.eval(frameTuple, outTuple);
-        foundAtLeastOneMatch = true;
-        return outTuple;
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public void rescan() throws IOException {
-    super.rescan();
-    needNextRightTuple = true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/PhysicalExecutorVisitor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/PhysicalExecutorVisitor.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/PhysicalExecutorVisitor.java
index 505b599..c4d90a5 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/PhysicalExecutorVisitor.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/PhysicalExecutorVisitor.java
@@ -79,9 +79,6 @@ public interface PhysicalExecutorVisitor<CONTEXT, RESULT> {
   RESULT visitNLJoin(CONTEXT context, NLJoinExec exec, Stack<PhysicalExec> stack)
       throws PhysicalPlanningException;
 
-  RESULT visitNLLeftOuterJoin(CONTEXT context, NLLeftOuterJoinExec exec, Stack<PhysicalExec> stack)
-      throws PhysicalPlanningException;
-
   RESULT visitProjection(CONTEXT context, ProjectionExec exec, Stack<PhysicalExec> stack)
       throws PhysicalPlanningException;
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RightOuterMergeJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RightOuterMergeJoinExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RightOuterMergeJoinExec.java
index 7abfbe6..fd825b1 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RightOuterMergeJoinExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RightOuterMergeJoinExec.java
@@ -102,7 +102,6 @@ public class RightOuterMergeJoinExec extends CommonJoinExec {
    * @throws IOException
    */
   public Tuple next() throws IOException {
-    Tuple previous;
 
     while (!context.isStopped()) {
       boolean newRound = false;
@@ -121,7 +120,7 @@ public class RightOuterMergeJoinExec extends CommonJoinExec {
 
         // The finalizing stage, where remaining tuples on the only right are transformed into left-padded results
         if (end) {
-          if (initRightDone == false) {
+          if (!initRightDone) {
             // maybe the left operand was empty => the right one didn't have the chance to initialize
             rightTuple = rightChild.next();
             initRightDone = true;
@@ -160,18 +159,24 @@ public class RightOuterMergeJoinExec extends CommonJoinExec {
           }
         }
 
-        if(rightTuple == null){
+        if(rightTuple == null) {
           rightTuple = rightChild.next();
-
-          if(rightTuple != null){
-            initRightDone = true;
-          }
-          else {
+          if (rightTuple == null) {
             initRightDone = true;
             end = true;
             continue;
           }
         }
+        if (rightFiltered(rightTuple)) {
+          Tuple nullPaddedTuple = createNullPaddedTuple(leftNumCols);
+          frameTuple.set(nullPaddedTuple, rightTuple);
+          projector.eval(frameTuple, outTuple);
+
+          rightTuple = null;
+          return outTuple;
+        }
+        initRightDone = true;
+
         //////////////////////////////////////////////////////////////////////
         // END INITIALIZATION STAGE
         //////////////////////////////////////////////////////////////////////
@@ -203,10 +208,7 @@ public class RightOuterMergeJoinExec extends CommonJoinExec {
 
             // we simulate we found a match, which is exactly the null padded one
             // BEFORE RETURN, MOVE FORWARD
-            rightTuple = rightChild.next();
-            if(rightTuple == null) {
-              end = true;
-            }
+            rightTuple = null;
             return outTuple;
 
           } else if (cmp < 0) {
@@ -223,6 +225,7 @@ public class RightOuterMergeJoinExec extends CommonJoinExec {
         // END MOVE FORWARDING STAGE
         //////////////////////////////////////////////////////////////////////
 
+        Tuple previous = null;
         // once a match is found, retain all tuples with this key in tuple slots on each side
         if(!end) {
           endInPopulationStage = false;
@@ -257,6 +260,19 @@ public class RightOuterMergeJoinExec extends CommonJoinExec {
             endInPopulationStage = true;
           }
         } // if end false
+        if (previous != null && rightFiltered(previous)) {
+          Tuple nullPaddedTuple = createNullPaddedTuple(leftNumCols);
+          frameTuple.set(nullPaddedTuple, previous);
+          projector.eval(frameTuple, outTuple);
+
+          // reset tuple slots for a new round
+          leftTupleSlots.clear();
+          innerTupleSlots.clear();
+          posRightTupleSlots = -1;
+          posLeftTupleSlots = -1;
+
+          return outTuple;
+        }
       } // if newRound
 
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/main/java/org/apache/tajo/engine/utils/CacheHolder.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/utils/CacheHolder.java b/tajo-core/src/main/java/org/apache/tajo/engine/utils/CacheHolder.java
index 6a5c0bf..addca49 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/utils/CacheHolder.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/utils/CacheHolder.java
@@ -18,7 +18,6 @@
 
 package org.apache.tajo.engine.utils;
 
-import com.google.common.collect.Maps;
 import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.storage.Tuple;
@@ -66,7 +65,7 @@ public interface CacheHolder<T> {
 
     @Override
     public Map<Tuple, List<Tuple>> getData() {
-      return Maps.newHashMap(data);
+      return data;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/tajo/blob/36a703c5/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
index 95debd4..7210214 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
@@ -206,10 +206,10 @@ public class TestHashSemiJoinExec {
     // expect result without duplicated tuples.
     while ((tuple = exec.next()) != null) {
       count++;
-      assertTrue(i == tuple.get(0).asInt4());
-      assertTrue(i == tuple.get(1).asInt4());
-      assertTrue(("dept_" + i).equals(tuple.get(2).asChars()));
-      assertTrue(10 + i == tuple.get(3).asInt4());
+      assertEquals(i, tuple.get(0).asInt4());
+      assertEquals(i, tuple.get(1).asInt4());
+      assertEquals("dept_" + i, tuple.get(2).asChars());
+      assertEquals(10 + i, tuple.get(3).asInt4());
 
       i += 2;
     }


[09/10] tajo git commit: TAJO-1603: Refactor StorageManager. (hyunsik)

Posted by ji...@apache.org.
TAJO-1603: Refactor StorageManager. (hyunsik)

Closes #570


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/5491f0e7
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/5491f0e7
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/5491f0e7

Branch: refs/heads/index_support
Commit: 5491f0e7507c7efa1b2306d4c1f1d25240e482a9
Parents: 36a703c
Author: Hyunsik Choi <hy...@apache.org>
Authored: Fri May 15 01:48:09 2015 -0700
Committer: Hyunsik Choi <hy...@apache.org>
Committed: Fri May 15 01:48:09 2015 -0700

----------------------------------------------------------------------
 CHANGES                                         |   2 +
 .../engine/planner/PhysicalPlannerImpl.java     |   5 +-
 .../planner/physical/BSTIndexScanExec.java      |   2 +-
 .../planner/physical/ColPartitionStoreExec.java |   7 +-
 .../physical/RangeShuffleFileWriteExec.java     |   3 +-
 .../engine/planner/physical/SeqScanExec.java    |   2 +-
 .../engine/planner/physical/StoreTableExec.java |   9 +-
 .../org/apache/tajo/master/GlobalEngine.java    |   4 +-
 .../java/org/apache/tajo/master/TajoMaster.java |   3 +-
 .../apache/tajo/master/exec/DDLExecutor.java    |   5 +-
 .../exec/NonForwardQueryResultFileScanner.java  |   7 +-
 .../apache/tajo/master/exec/QueryExecutor.java  |   4 +-
 .../java/org/apache/tajo/querymaster/Query.java |   7 +-
 .../tajo/querymaster/QueryMasterTask.java       |   4 +-
 .../apache/tajo/querymaster/Repartitioner.java  |  24 +-
 .../java/org/apache/tajo/querymaster/Stage.java |   6 +-
 .../java/org/apache/tajo/worker/TajoWorker.java |   4 +-
 .../main/java/org/apache/tajo/worker/Task.java  |   3 +-
 .../org/apache/tajo/BackendTestingUtil.java     |   3 +-
 .../planner/global/TestBroadcastJoinPlan.java   |   3 +-
 .../planner/physical/TestBNLJoinExec.java       |   5 +-
 .../planner/physical/TestBSTIndexExec.java      |   3 +-
 .../planner/physical/TestExternalSortExec.java  |   3 +-
 .../physical/TestFullOuterHashJoinExec.java     |   9 +-
 .../physical/TestFullOuterMergeJoinExec.java    |  10 +-
 .../planner/physical/TestHashAntiJoinExec.java  |   5 +-
 .../planner/physical/TestHashJoinExec.java      |   5 +-
 .../planner/physical/TestHashSemiJoinExec.java  |   5 +-
 .../physical/TestLeftOuterHashJoinExec.java     |   9 +-
 .../planner/physical/TestMergeJoinExec.java     |   5 +-
 .../engine/planner/physical/TestNLJoinExec.java |   5 +-
 .../planner/physical/TestPhysicalPlanner.java   |  11 +-
 .../physical/TestProgressExternalSortExec.java  |   3 +-
 .../physical/TestRightOuterHashJoinExec.java    |   7 +-
 .../physical/TestRightOuterMergeJoinExec.java   |  11 +-
 .../engine/planner/physical/TestSortExec.java   |   5 +-
 .../tajo/engine/query/TestHBaseTable.java       |  12 +-
 .../tajo/engine/query/TestJoinBroadcast.java    |   2 +-
 .../org/apache/tajo/jdbc/TestResultSet.java     |   2 +-
 .../tajo/master/TestExecutionBlockCursor.java   |   4 +-
 .../org/apache/tajo/storage/TestRowFile.java    |   5 +-
 .../org/apache/tajo/storage/MergeScanner.java   |   2 +-
 .../org/apache/tajo/storage/StorageManager.java | 645 +------------------
 .../org/apache/tajo/storage/TableSpace.java     |  74 +++
 .../apache/tajo/storage/TableSpaceManager.java  | 254 ++++++++
 .../tajo/storage/hbase/HBasePutAppender.java    |   4 +-
 .../apache/tajo/storage/hbase/HBaseScanner.java |   7 +-
 .../tajo/storage/hbase/HBaseStorageManager.java |  40 +-
 .../storage/hbase/TestHBaseStorageManager.java  |   5 +-
 .../org/apache/tajo/storage/FileAppender.java   |   2 +-
 .../apache/tajo/storage/FileStorageManager.java | 377 ++++++++++-
 .../storage/HashShuffleAppenderManager.java     |   4 +-
 .../tajo/storage/TestCompressionStorages.java   |   5 +-
 .../tajo/storage/TestDelimitedTextFile.java     |   9 +-
 .../tajo/storage/TestFileStorageManager.java    |  11 +-
 .../apache/tajo/storage/TestFileSystems.java    |   3 +-
 .../org/apache/tajo/storage/TestLineReader.java |   9 +-
 .../apache/tajo/storage/TestMergeScanner.java   |   7 +-
 .../org/apache/tajo/storage/TestStorages.java   |  48 +-
 .../apache/tajo/storage/index/TestBSTIndex.java |  53 +-
 .../index/TestSingleCSVFileBSTIndex.java        |   5 +-
 .../apache/tajo/storage/json/TestJsonSerDe.java |   8 +-
 62 files changed, 934 insertions(+), 871 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 44ae4b4..21f5e5a 100644
--- a/CHANGES
+++ b/CHANGES
@@ -24,6 +24,8 @@ Release 0.11.0 - unreleased
 
   IMPROVEMENT
 
+    TAJO-1603: Refactor StorageManager. (hyunsik)
+
     TAJO-1542: Refactoring of HashJoinExecs. (Contributed Navis, Committed by 
     hyunsik)
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
index 978dde8..ff42d5f 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
@@ -30,7 +30,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.tajo.SessionVars;
 import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.SortSpec;
 import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.catalog.proto.CatalogProtos.SortSpecProto;
@@ -925,7 +924,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
         if (broadcastFlag) {
           PartitionedTableScanNode partitionedTableScanNode = (PartitionedTableScanNode) scanNode;
           List<Fragment> fileFragments = TUtil.newList();
-          FileStorageManager fileStorageManager = (FileStorageManager)StorageManager.getFileStorageManager(ctx.getConf());
+          FileStorageManager fileStorageManager = (FileStorageManager) TableSpaceManager.getFileStorageManager(ctx.getConf());
           for (Path path : partitionedTableScanNode.getInputPaths()) {
             fileFragments.addAll(TUtil.newList(fileStorageManager.split(scanNode.getCanonicalName(), path)));
           }
@@ -1189,7 +1188,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
         FragmentConvertor.convert(ctx.getConf(), fragmentProtos);
 
     String indexName = IndexUtil.getIndexNameOfFrag(fragments.get(0), annotation.getSortKeys());
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(ctx.getConf());
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(ctx.getConf());
     Path indexPath = new Path(sm.getTablePath(annotation.getTableName()), "index");
 
     TupleComparator comp = new BaseTupleComparator(annotation.getKeySchema(),

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
index 806d34c..4612d45 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/BSTIndexScanExec.java
@@ -56,7 +56,7 @@ public class BSTIndexScanExec extends PhysicalExec {
     this.qual = scanNode.getQual();
     this.datum = datum;
 
-    this.fileScanner = StorageManager.getSeekableScanner(context.getConf(),
+    this.fileScanner = TableSpaceManager.getSeekableScanner(context.getConf(),
         scanNode.getTableDesc().getMeta(), scanNode.getInSchema(), fragment, outSchema);
     this.fileScanner.init();
     this.projector = new Projector(context, inSchema, outSchema, scanNode.getTargets());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
index 4481569..8d53a6f 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
@@ -34,10 +34,7 @@ import org.apache.tajo.plan.logical.CreateTableNode;
 import org.apache.tajo.plan.logical.InsertNode;
 import org.apache.tajo.plan.logical.NodeType;
 import org.apache.tajo.plan.logical.StoreTableNode;
-import org.apache.tajo.storage.Appender;
-import org.apache.tajo.storage.FileStorageManager;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.StorageUtil;
+import org.apache.tajo.storage.*;
 import org.apache.tajo.unit.StorageUnit;
 import org.apache.tajo.worker.TaskAttemptContext;
 
@@ -168,7 +165,7 @@ public abstract class ColPartitionStoreExec extends UnaryPhysicalExec {
       actualFilePath = new Path(lastFileName + "_" + suffixId);
     }
 
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(context.getConf()))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(context.getConf()))
         .getAppender(meta, outSchema, actualFilePath);
 
     appender.enableStats();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RangeShuffleFileWriteExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RangeShuffleFileWriteExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RangeShuffleFileWriteExec.java
index cb1fa05..6fd2ce4 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RangeShuffleFileWriteExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/RangeShuffleFileWriteExec.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.plan.util.PlannerUtil;
 import org.apache.tajo.storage.*;
@@ -78,7 +77,7 @@ public class RangeShuffleFileWriteExec extends UnaryPhysicalExec {
         context.getDataChannel().getStoreType() : "RAW");
     FileSystem fs = new RawLocalFileSystem();
     fs.mkdirs(storeTablePath);
-    this.appender = (FileAppender) ((FileStorageManager)StorageManager.getFileStorageManager(context.getConf()))
+    this.appender = (FileAppender) ((FileStorageManager) TableSpaceManager.getFileStorageManager(context.getConf()))
         .getAppender(meta, outSchema, new Path(storeTablePath, "output"));
     this.appender.enableStats();
     this.appender.init();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/SeqScanExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/SeqScanExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/SeqScanExec.java
index 3d95068..2225dae 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/SeqScanExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/SeqScanExec.java
@@ -202,7 +202,7 @@ public class SeqScanExec extends ScanExec {
             FragmentConvertor.convert(context.getConf(), fragments), projected
         );
       } else {
-        StorageManager storageManager = StorageManager.getStorageManager(
+        StorageManager storageManager = TableSpaceManager.getStorageManager(
             context.getConf(), plan.getTableDesc().getMeta().getStoreType());
         this.scanner = storageManager.getScanner(meta,
             plan.getPhysicalSchema(), fragments[0], projected);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/StoreTableExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/StoreTableExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/StoreTableExec.java
index 5622699..b0263d7 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/StoreTableExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/StoreTableExec.java
@@ -30,10 +30,7 @@ import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.plan.logical.InsertNode;
 import org.apache.tajo.plan.logical.PersistentStoreNode;
 import org.apache.tajo.plan.util.PlannerUtil;
-import org.apache.tajo.storage.Appender;
-import org.apache.tajo.storage.FileStorageManager;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.Tuple;
+import org.apache.tajo.storage.*;
 import org.apache.tajo.unit.StorageUnit;
 import org.apache.tajo.worker.TaskAttemptContext;
 
@@ -93,7 +90,7 @@ public class StoreTableExec extends UnaryPhysicalExec {
         lastFileName = new Path(lastFileName + "_" + suffixId);
       }
 
-      appender = ((FileStorageManager)StorageManager.getFileStorageManager(context.getConf()))
+      appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(context.getConf()))
           .getAppender(meta, appenderSchema, lastFileName);
 
       if (suffixId > 0) {
@@ -101,7 +98,7 @@ public class StoreTableExec extends UnaryPhysicalExec {
             "The remain output will be written into " + lastFileName.toString());
       }
     } else {
-      appender = StorageManager.getStorageManager(context.getConf(), meta.getStoreType()).getAppender(
+      appender = TableSpaceManager.getStorageManager(context.getConf(), meta.getStoreType()).getAppender(
           context.getQueryContext(),
           context.getTaskId(), meta, appenderSchema, context.getQueryContext().getStagingDir());
     }

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java b/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
index adbc8a4..aeb4166 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
@@ -35,7 +35,6 @@ import org.apache.tajo.algebra.JsonHelper;
 import org.apache.tajo.catalog.CatalogService;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableDesc;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.engine.parser.SQLAnalyzer;
 import org.apache.tajo.engine.parser.SQLSyntaxError;
@@ -55,6 +54,7 @@ import org.apache.tajo.plan.verifier.PreLogicalPlanVerifier;
 import org.apache.tajo.plan.verifier.VerificationState;
 import org.apache.tajo.plan.verifier.VerifyException;
 import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.CommonTestingUtil;
 
 import java.io.IOException;
@@ -302,7 +302,7 @@ public class GlobalEngine extends AbstractService {
           InsertNode iNode = rootNode.getChild();
           Schema outSchema = iNode.getChild().getOutSchema();
 
-          StorageManager.getStorageManager(queryContext.getConf(), storeType)
+          TableSpaceManager.getStorageManager(queryContext.getConf(), storeType)
               .verifyInsertTableSchema(tableDesc, outSchema);
         } catch (Throwable t) {
           state.addVerification(t.getMessage());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java b/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
index fb2a160..abf070e 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/TajoMaster.java
@@ -54,6 +54,7 @@ import org.apache.tajo.service.ServiceTracker;
 import org.apache.tajo.service.ServiceTrackerFactory;
 import org.apache.tajo.session.SessionManager;
 import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.*;
 import org.apache.tajo.util.history.HistoryReader;
 import org.apache.tajo.util.history.HistoryWriter;
@@ -182,7 +183,7 @@ public class TajoMaster extends CompositeService {
       // check the system directory and create if they are not created.
       checkAndInitializeSystemDirectories();
       diagnoseTajoMaster();
-      this.storeManager = StorageManager.getFileStorageManager(systemConf);
+      this.storeManager = TableSpaceManager.getFileStorageManager(systemConf);
 
       catalogServer = new CatalogServer(loadFunctions());
       addIfService(catalogServer);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/master/exec/DDLExecutor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/exec/DDLExecutor.java b/tajo-core/src/main/java/org/apache/tajo/master/exec/DDLExecutor.java
index c84b0c7..888194d 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/exec/DDLExecutor.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/exec/DDLExecutor.java
@@ -38,6 +38,7 @@ import org.apache.tajo.plan.logical.*;
 import org.apache.tajo.plan.util.PlannerUtil;
 import org.apache.tajo.storage.StorageManager;
 import org.apache.tajo.storage.StorageUtil;
+import org.apache.tajo.storage.TableSpaceManager;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -242,7 +243,7 @@ public class DDLExecutor {
       desc.setPartitionMethod(partitionDesc);
     }
 
-    StorageManager.getStorageManager(queryContext.getConf(), storeType).createTable(desc, ifNotExists);
+    TableSpaceManager.getStorageManager(queryContext.getConf(), storeType).createTable(desc, ifNotExists);
 
     if (catalog.createTable(desc)) {
       LOG.info("Table " + desc.getName() + " is created (" + desc.getStats().getNumBytes() + ")");
@@ -289,7 +290,7 @@ public class DDLExecutor {
 
     if (purge) {
       try {
-        StorageManager.getStorageManager(queryContext.getConf(),
+        TableSpaceManager.getStorageManager(queryContext.getConf(),
             tableDesc.getMeta().getStoreType()).purgeTable(tableDesc);
       } catch (IOException e) {
         throw new InternalError(e.getMessage());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/master/exec/NonForwardQueryResultFileScanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/exec/NonForwardQueryResultFileScanner.java b/tajo-core/src/main/java/org/apache/tajo/master/exec/NonForwardQueryResultFileScanner.java
index 9c0bd48..d2022b5 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/exec/NonForwardQueryResultFileScanner.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/exec/NonForwardQueryResultFileScanner.java
@@ -33,11 +33,8 @@ import org.apache.tajo.plan.expr.EvalTreeUtil;
 import org.apache.tajo.plan.logical.ScanNode;
 import org.apache.tajo.engine.planner.physical.SeqScanExec;
 import org.apache.tajo.engine.query.QueryContext;
-import org.apache.tajo.storage.FileStorageManager;
-import org.apache.tajo.storage.RowStoreUtil;
+import org.apache.tajo.storage.*;
 import org.apache.tajo.storage.RowStoreUtil.RowStoreEncoder;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.Tuple;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.storage.fragment.FragmentConvertor;
 import org.apache.tajo.util.StringUtils;
@@ -104,7 +101,7 @@ public class NonForwardQueryResultFileScanner implements NonForwardQueryResultSc
   }
 
   private void initSeqScanExec() throws IOException {
-    StorageManager storageManager = StorageManager.getStorageManager(tajoConf, tableDesc.getMeta().getStoreType());
+    StorageManager storageManager = TableSpaceManager.getStorageManager(tajoConf, tableDesc.getMeta().getStoreType());
     List<Fragment> fragments = null;
     setPartition(storageManager);
     fragments = storageManager.getNonForwardSplit(tableDesc, currentFragmentIndex, MAX_FRAGMENT_NUM_PER_SCAN);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
index 481bdbe..695c38e 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
@@ -438,7 +438,7 @@ public class QueryExecutor {
 
     String storeType = PlannerUtil.getStoreType(plan);
     if (storeType != null) {
-      StorageManager sm = StorageManager.getStorageManager(context.getConf(), storeType);
+      StorageManager sm = TableSpaceManager.getStorageManager(context.getConf(), storeType);
       StorageProperty storageProperty = sm.getStorageProperty();
       if (!storageProperty.isSupportsInsertInto()) {
         throw new VerifyException("Inserting into non-file storage is not supported.");
@@ -476,7 +476,7 @@ public class QueryExecutor {
 
     String storeType = PlannerUtil.getStoreType(plan);
     if (storeType != null) {
-      StorageManager sm = StorageManager.getStorageManager(planner.getConf(), storeType);
+      StorageManager sm = TableSpaceManager.getStorageManager(planner.getConf(), storeType);
       StorageProperty storageProperty = sm.getStorageProperty();
       if (storageProperty.isSortedInsert()) {
         String tableName = PlannerUtil.getStoreTableName(plan);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/querymaster/Query.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/querymaster/Query.java b/tajo-core/src/main/java/org/apache/tajo/querymaster/Query.java
index a2e434b..362dfa6 100644
--- a/tajo-core/src/main/java/org/apache/tajo/querymaster/Query.java
+++ b/tajo-core/src/main/java/org/apache/tajo/querymaster/Query.java
@@ -36,7 +36,6 @@ import org.apache.tajo.catalog.proto.CatalogProtos.UpdateTableStatsProto;
 import org.apache.tajo.catalog.CatalogService;
 import org.apache.tajo.catalog.TableDesc;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.engine.planner.global.ExecutionBlock;
@@ -46,8 +45,8 @@ import org.apache.tajo.plan.logical.*;
 import org.apache.tajo.engine.query.QueryContext;
 import org.apache.tajo.master.event.*;
 import org.apache.tajo.plan.util.PlannerUtil;
-import org.apache.tajo.storage.StorageManager;
 import org.apache.tajo.storage.StorageConstants;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.TUtil;
 import org.apache.tajo.util.history.QueryHistory;
 import org.apache.tajo.util.history.StageHistory;
@@ -424,7 +423,7 @@ public class Query implements EventHandler<QueryEvent> {
           if (storeType != null) {
             LogicalRootNode rootNode = lastStage.getMasterPlan().getLogicalPlan().getRootBlock().getRoot();
             try {
-              StorageManager.getStorageManager(query.systemConf, storeType).rollbackOutputCommit(rootNode.getChild());
+              TableSpaceManager.getStorageManager(query.systemConf, storeType).rollbackOutputCommit(rootNode.getChild());
             } catch (IOException e) {
               LOG.warn(query.getId() + ", failed processing cleanup storage when query failed:" + e.getMessage(), e);
             }
@@ -445,7 +444,7 @@ public class Query implements EventHandler<QueryEvent> {
         CatalogService catalog = lastStage.getContext().getQueryMasterContext().getWorkerContext().getCatalog();
         TableDesc tableDesc =  PlannerUtil.getTableDesc(catalog, rootNode.getChild());
 
-        Path finalOutputDir = StorageManager.getStorageManager(query.systemConf, storeType)
+        Path finalOutputDir = TableSpaceManager.getStorageManager(query.systemConf, storeType)
             .commitOutputData(query.context.getQueryContext(),
                 lastStage.getId(), lastStage.getMasterPlan().getLogicalPlan(), lastStage.getSchema(), tableDesc);
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java b/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
index 8d20141..d77001c 100644
--- a/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
+++ b/tajo-core/src/main/java/org/apache/tajo/querymaster/QueryMasterTask.java
@@ -36,7 +36,6 @@ import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.algebra.JsonHelper;
 import org.apache.tajo.catalog.CatalogService;
 import org.apache.tajo.catalog.TableDesc;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.engine.planner.global.MasterPlan;
 import org.apache.tajo.engine.query.QueryContext;
@@ -59,6 +58,7 @@ import org.apache.tajo.session.Session;
 import org.apache.tajo.storage.StorageManager;
 import org.apache.tajo.storage.StorageProperty;
 import org.apache.tajo.storage.StorageUtil;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.metrics.TajoMetrics;
 import org.apache.tajo.util.metrics.reporter.MetricsConsoleReporter;
 import org.apache.tajo.worker.AbstractResourceAllocator;
@@ -324,7 +324,7 @@ public class QueryMasterTask extends CompositeService {
 
       String storeType = PlannerUtil.getStoreType(plan);
       if (storeType != null) {
-        sm = StorageManager.getStorageManager(systemConf, storeType);
+        sm = TableSpaceManager.getStorageManager(systemConf, storeType);
         StorageProperty storageProperty = sm.getStorageProperty();
         if (storageProperty.isSortedInsert()) {
           String tableName = PlannerUtil.getStoreTableName(plan);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/querymaster/Repartitioner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/querymaster/Repartitioner.java b/tajo-core/src/main/java/org/apache/tajo/querymaster/Repartitioner.java
index 7160a2d..b43b506 100644
--- a/tajo-core/src/main/java/org/apache/tajo/querymaster/Repartitioner.java
+++ b/tajo-core/src/main/java/org/apache/tajo/querymaster/Repartitioner.java
@@ -27,7 +27,6 @@ import org.apache.tajo.ExecutionBlockId;
 import org.apache.tajo.SessionVars;
 import org.apache.tajo.algebra.JoinType;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.StatisticsUtil;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.conf.TajoConf.ConfVars;
@@ -48,10 +47,7 @@ import org.apache.tajo.plan.logical.SortNode.SortPurpose;
 import org.apache.tajo.plan.util.PlannerUtil;
 import org.apache.tajo.plan.PlanningException;
 import org.apache.tajo.plan.logical.*;
-import org.apache.tajo.storage.FileStorageManager;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.RowStoreUtil;
-import org.apache.tajo.storage.TupleRange;
+import org.apache.tajo.storage.*;
 import org.apache.tajo.storage.fragment.FileFragment;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.util.Pair;
@@ -96,7 +92,7 @@ public class Repartitioner {
       TableDesc tableDesc = masterContext.getTableDescMap().get(scans[i].getCanonicalName());
       if (tableDesc == null) { // if it is a real table stored on storage
         FileStorageManager storageManager =
-            (FileStorageManager)StorageManager.getFileStorageManager(stage.getContext().getConf());
+            (FileStorageManager) TableSpaceManager.getFileStorageManager(stage.getContext().getConf());
 
         tablePath = storageManager.getTablePath(scans[i].getTableName());
         if (execBlock.getUnionScanMap() != null && !execBlock.getUnionScanMap().isEmpty()) {
@@ -117,7 +113,7 @@ public class Repartitioner {
         }
 
         StorageManager storageManager =
-            StorageManager.getStorageManager(stage.getContext().getConf(), tableDesc.getMeta().getStoreType());
+            TableSpaceManager.getStorageManager(stage.getContext().getConf(), tableDesc.getMeta().getStoreType());
 
         // if table has no data, storageManager will return empty FileFragment.
         // So, we need to handle FileFragment by its size.
@@ -412,7 +408,7 @@ public class Repartitioner {
         TableDesc tableDesc = masterContext.getTableDescMap().get(eachScan.getCanonicalName());
         if (eachScan.getType() == NodeType.PARTITIONS_SCAN) {
           FileStorageManager storageManager =
-              (FileStorageManager)StorageManager.getFileStorageManager(stage.getContext().getConf());
+              (FileStorageManager) TableSpaceManager.getFileStorageManager(stage.getContext().getConf());
 
           PartitionedTableScanNode partitionScan = (PartitionedTableScanNode)eachScan;
           partitionScanPaths = partitionScan.getInputPaths();
@@ -420,7 +416,7 @@ public class Repartitioner {
           getFragmentsFromPartitionedTable(storageManager, eachScan, tableDesc);
           partitionScan.setInputPaths(partitionScanPaths);
         } else {
-          StorageManager storageManager = StorageManager.getStorageManager(stage.getContext().getConf(),
+          StorageManager storageManager = TableSpaceManager.getStorageManager(stage.getContext().getConf(),
               tableDesc.getMeta().getStoreType());
           Collection<Fragment> scanFragments = storageManager.getSplits(eachScan.getCanonicalName(),
               tableDesc, eachScan);
@@ -540,11 +536,11 @@ public class Repartitioner {
         partitionScanPaths = partitionScan.getInputPaths();
         // set null to inputPaths in getFragmentsFromPartitionedTable()
         FileStorageManager storageManager =
-            (FileStorageManager)StorageManager.getFileStorageManager(stage.getContext().getConf());
+            (FileStorageManager) TableSpaceManager.getFileStorageManager(stage.getContext().getConf());
         scanFragments = getFragmentsFromPartitionedTable(storageManager, scan, desc);
       } else {
         StorageManager storageManager =
-            StorageManager.getStorageManager(stage.getContext().getConf(), desc.getMeta().getStoreType());
+            TableSpaceManager.getStorageManager(stage.getContext().getConf(), desc.getMeta().getStoreType());
 
         scanFragments = storageManager.getSplits(scan.getCanonicalName(), desc, scan);
       }
@@ -649,7 +645,7 @@ public class Repartitioner {
     ExecutionBlock execBlock = stage.getBlock();
     ScanNode scan = execBlock.getScanNodes()[0];
     Path tablePath;
-    tablePath = ((FileStorageManager)StorageManager.getFileStorageManager(stage.getContext().getConf()))
+    tablePath = ((FileStorageManager) TableSpaceManager.getFileStorageManager(stage.getContext().getConf()))
         .getTablePath(scan.getTableName());
 
     ExecutionBlock sampleChildBlock = masterPlan.getChild(stage.getId(), 0);
@@ -678,7 +674,7 @@ public class Repartitioner {
         throw new IOException("Can't get table meta data from catalog: " +
             PlannerUtil.getStoreTableName(masterPlan.getLogicalPlan()));
       }
-      ranges = StorageManager.getStorageManager(stage.getContext().getConf(), storeType)
+      ranges = TableSpaceManager.getStorageManager(stage.getContext().getConf(), storeType)
           .getInsertSortRanges(stage.getContext().getQueryContext(), tableDesc,
               sortNode.getInSchema(), sortSpecs,
               mergedRange);
@@ -815,7 +811,7 @@ public class Repartitioner {
     ExecutionBlock execBlock = stage.getBlock();
     ScanNode scan = execBlock.getScanNodes()[0];
     Path tablePath;
-    tablePath = ((FileStorageManager)StorageManager.getFileStorageManager(stage.getContext().getConf()))
+    tablePath = ((FileStorageManager) TableSpaceManager.getFileStorageManager(stage.getContext().getConf()))
         .getTablePath(scan.getTableName());
 
     Fragment frag = new FileFragment(scan.getCanonicalName(), tablePath, 0, 0, new String[]{UNKNOWN_HOST});

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/querymaster/Stage.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/querymaster/Stage.java b/tajo-core/src/main/java/org/apache/tajo/querymaster/Stage.java
index f03ee2f..548ea26 100644
--- a/tajo-core/src/main/java/org/apache/tajo/querymaster/Stage.java
+++ b/tajo-core/src/main/java/org/apache/tajo/querymaster/Stage.java
@@ -34,7 +34,6 @@ import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableDesc;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.ColumnStats;
 import org.apache.tajo.catalog.statistics.StatisticsUtil;
 import org.apache.tajo.catalog.statistics.TableStats;
@@ -62,6 +61,7 @@ import org.apache.tajo.plan.util.PlannerUtil;
 import org.apache.tajo.querymaster.Task.IntermediateEntry;
 import org.apache.tajo.storage.FileStorageManager;
 import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.unit.StorageUnit;
 import org.apache.tajo.util.KeyValueSet;
@@ -1090,11 +1090,11 @@ public class Stage implements EventHandler<StageEvent> {
       if (scan.getType() == NodeType.PARTITIONS_SCAN) {
         // After calling this method, partition paths are removed from the physical plan.
         FileStorageManager storageManager =
-            (FileStorageManager)StorageManager.getFileStorageManager(stage.getContext().getConf());
+            (FileStorageManager) TableSpaceManager.getFileStorageManager(stage.getContext().getConf());
         fragments = Repartitioner.getFragmentsFromPartitionedTable(storageManager, scan, table);
       } else {
         StorageManager storageManager =
-            StorageManager.getStorageManager(stage.getContext().getConf(), meta.getStoreType());
+            TableSpaceManager.getStorageManager(stage.getContext().getConf(), meta.getStoreType());
         fragments = storageManager.getSplits(scan.getCanonicalName(), table, scan);
       }
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java b/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
index 0cecd73..4f07ca6 100644
--- a/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
+++ b/tajo-core/src/main/java/org/apache/tajo/worker/TajoWorker.java
@@ -56,7 +56,7 @@ import org.apache.tajo.rule.EvaluationFailedException;
 import org.apache.tajo.rule.SelfDiagnosisRuleEngine;
 import org.apache.tajo.rule.SelfDiagnosisRuleSession;
 import org.apache.tajo.storage.HashShuffleAppenderManager;
-import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.*;
 import org.apache.tajo.util.history.HistoryReader;
 import org.apache.tajo.util.history.HistoryWriter;
@@ -370,7 +370,7 @@ public class TajoWorker extends CompositeService {
     }
 
     try {
-      StorageManager.close();
+      TableSpaceManager.shutdown();
     } catch (IOException ie) {
       LOG.error(ie.getMessage(), ie);
     }

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/worker/Task.java b/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
index 53ed73e..01f56b8 100644
--- a/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
+++ b/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
@@ -52,7 +52,6 @@ import org.apache.tajo.plan.serder.LogicalNodeDeserializer;
 import org.apache.tajo.plan.util.PlannerUtil;
 import org.apache.tajo.pullserver.TajoPullServerService;
 import org.apache.tajo.pullserver.retriever.FileChunk;
-import org.apache.tajo.rpc.NettyClientBase;
 import org.apache.tajo.rpc.NullCallback;
 import org.apache.tajo.storage.*;
 import org.apache.tajo.storage.fragment.FileFragment;
@@ -162,7 +161,7 @@ public class Task {
         this.sortComp = new BaseTupleComparator(finalSchema, sortNode.getSortKeys());
       }
     } else {
-      Path outFilePath = ((FileStorageManager)StorageManager.getFileStorageManager(systemConf))
+      Path outFilePath = ((FileStorageManager) TableSpaceManager.getFileStorageManager(systemConf))
           .getAppenderFilePath(taskId, queryContext.getStagingDir());
       LOG.info("Output File Path: " + outFilePath);
       context.setOutputPath(outFilePath);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/BackendTestingUtil.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/BackendTestingUtil.java b/tajo-core/src/test/java/org/apache/tajo/BackendTestingUtil.java
index 0774eff..a1c6011 100644
--- a/tajo-core/src/test/java/org/apache/tajo/BackendTestingUtil.java
+++ b/tajo-core/src/test/java/org/apache/tajo/BackendTestingUtil.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.DatumFactory;
@@ -47,7 +46,7 @@ public class BackendTestingUtil {
 
   public static void writeTmpTable(TajoConf conf, Path tablePath)
       throws IOException {
-    FileStorageManager sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    FileStorageManager sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     Appender appender;
 
     Path filePath = new Path(tablePath, "table.csv");

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/global/TestBroadcastJoinPlan.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/global/TestBroadcastJoinPlan.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/global/TestBroadcastJoinPlan.java
index 9ff360f..44a22ae 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/global/TestBroadcastJoinPlan.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/global/TestBroadcastJoinPlan.java
@@ -27,7 +27,6 @@ import org.apache.tajo.QueryIdFactory;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes;
 import org.apache.tajo.conf.TajoConf;
@@ -137,7 +136,7 @@ public class TestBroadcastJoinPlan {
         contentsData += j;
       }
     }
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(tableMeta, schema, dataPath);
     appender.init();
     Tuple tuple = new VTuple(schema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBNLJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBNLJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBNLJoinExec.java
index a50d813..104e09b 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBNLJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBNLJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -87,7 +86,7 @@ public class TestBNLJoinExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, schema, employeePath);
     appender.init();
     Tuple tuple = new VTuple(schema.size());
@@ -109,7 +108,7 @@ public class TestBNLJoinExec {
     peopleSchema.addColumn("age", Type.INT4);
     TableMeta peopleMeta = CatalogUtil.newTableMeta("CSV");
     Path peoplePath = new Path(testDir, "people.csv");
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(peopleMeta, peopleSchema, peoplePath);
     appender.init();
     tuple = new VTuple(peopleSchema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBSTIndexExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBSTIndexExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBSTIndexExec.java
index ac8860f..a8597e9 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBSTIndexExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestBSTIndexExec.java
@@ -26,7 +26,6 @@ import org.apache.tajo.TajoConstants;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -91,7 +90,7 @@ public class TestBSTIndexExec {
     Path workDir = CommonTestingUtil.getTestDir();
     catalog.createTablespace(DEFAULT_TABLESPACE_NAME, workDir.toUri().toString());
     catalog.createDatabase(TajoConstants.DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
-    sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
 
     idxPath = new Path(workDir, "test.idx");
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestExternalSortExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestExternalSortExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestExternalSortExec.java
index 0b93c76..221a622 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestExternalSortExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestExternalSortExec.java
@@ -24,7 +24,6 @@ import org.apache.tajo.TajoConstants;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -83,7 +82,7 @@ public class TestExternalSortExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, schema, employeePath);
     appender.enableStats();
     appender.init();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterHashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterHashJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterHashJoinExec.java
index e5ba9b3..501557f 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterHashJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterHashJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -105,7 +104,7 @@ public class TestFullOuterHashJoinExec {
 
     TableMeta dep3Meta = CatalogUtil.newTableMeta("CSV");
     Path dep3Path = new Path(testDir, "dep3.csv");
-    Appender appender1 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender1 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep3Meta, dep3Schema, dep3Path);
     appender1.init();
     Tuple tuple = new VTuple(dep3Schema.size());
@@ -135,7 +134,7 @@ public class TestFullOuterHashJoinExec {
 
     TableMeta job3Meta = CatalogUtil.newTableMeta("CSV");
     Path job3Path = new Path(testDir, "job3.csv");
-    Appender appender2 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender2 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(job3Meta, job3Schema, job3Path);
     appender2.init();
     Tuple tuple2 = new VTuple(job3Schema.size());
@@ -175,7 +174,7 @@ public class TestFullOuterHashJoinExec {
 
     TableMeta emp3Meta = CatalogUtil.newTableMeta("CSV");
     Path emp3Path = new Path(testDir, "emp3.csv");
-    Appender appender3 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender3 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(emp3Meta, emp3Schema, emp3Path);
     appender3.init();
     Tuple tuple3 = new VTuple(emp3Schema.size());
@@ -228,7 +227,7 @@ public class TestFullOuterHashJoinExec {
 
     TableMeta phone3Meta = CatalogUtil.newTableMeta("CSV");
     Path phone3Path = new Path(testDir, "phone3.csv");
-    Appender appender5 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender5 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(phone3Meta, phone3Schema, phone3Path);
     appender5.init();
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterMergeJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterMergeJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterMergeJoinExec.java
index 1bbaa66..4b8e1fa 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterMergeJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestFullOuterMergeJoinExec.java
@@ -109,7 +109,7 @@ public class TestFullOuterMergeJoinExec {
 
     TableMeta dep3Meta = CatalogUtil.newTableMeta("CSV");
     Path dep3Path = new Path(testDir, "dep3.csv");
-    Appender appender1 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender1 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep3Meta, dep3Schema, dep3Path);
     appender1.init();
     Tuple tuple = new VTuple(dep3Schema.size());
@@ -148,7 +148,7 @@ public class TestFullOuterMergeJoinExec {
 
     TableMeta dep4Meta = CatalogUtil.newTableMeta("CSV");
     Path dep4Path = new Path(testDir, "dep4.csv");
-    Appender appender4 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender4 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep4Meta, dep4Schema, dep4Path);
     appender4.init();
     Tuple tuple4 = new VTuple(dep4Schema.size());
@@ -180,7 +180,7 @@ public class TestFullOuterMergeJoinExec {
 
     TableMeta job3Meta = CatalogUtil.newTableMeta("CSV");
     Path job3Path = new Path(testDir, "job3.csv");
-    Appender appender2 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender2 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(job3Meta, job3Schema, job3Path);
     appender2.init();
     Tuple tuple2 = new VTuple(job3Schema.size());
@@ -220,7 +220,7 @@ public class TestFullOuterMergeJoinExec {
 
     TableMeta emp3Meta = CatalogUtil.newTableMeta("CSV");
     Path emp3Path = new Path(testDir, "emp3.csv");
-    Appender appender3 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender3 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(emp3Meta, emp3Schema, emp3Path);
     appender3.init();
     Tuple tuple3 = new VTuple(emp3Schema.size());
@@ -273,7 +273,7 @@ public class TestFullOuterMergeJoinExec {
 
     TableMeta phone3Meta = CatalogUtil.newTableMeta("CSV");
     Path phone3Path = new Path(testDir, "phone3.csv");
-    Appender appender5 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender5 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(phone3Meta, phone3Schema, phone3Path);
     appender5.init();
     appender5.flush();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashAntiJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashAntiJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashAntiJoinExec.java
index 30b9b19..624aae0 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashAntiJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashAntiJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -84,7 +83,7 @@ public class TestHashAntiJoinExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, employeeSchema, employeePath);
     appender.init();
     Tuple tuple = new VTuple(employeeSchema.size());
@@ -110,7 +109,7 @@ public class TestHashAntiJoinExec {
     peopleSchema.addColumn("age", Type.INT4);
     TableMeta peopleMeta = CatalogUtil.newTableMeta("CSV");
     Path peoplePath = new Path(testDir, "people.csv");
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(peopleMeta, peopleSchema, peoplePath);
     appender.init();
     tuple = new VTuple(peopleSchema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashJoinExec.java
index cd42d80..36d02b1 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashJoinExec.java
@@ -25,7 +25,6 @@ import org.apache.tajo.TajoConstants;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -86,7 +85,7 @@ public class TestHashJoinExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, employeeSchema, employeePath);
     appender.init();
     Tuple tuple = new VTuple(employeeSchema.size());
@@ -109,7 +108,7 @@ public class TestHashJoinExec {
     peopleSchema.addColumn("age", Type.INT4);
     TableMeta peopleMeta = CatalogUtil.newTableMeta("CSV");
     Path peoplePath = new Path(testDir, "people.csv");
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(peopleMeta, peopleSchema, peoplePath);
     appender.init();
     tuple = new VTuple(peopleSchema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
index 7210214..1a0151a 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestHashSemiJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -85,7 +84,7 @@ public class TestHashSemiJoinExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, employeeSchema, employeePath);
     appender.init();
     Tuple tuple = new VTuple(employeeSchema.size());
@@ -111,7 +110,7 @@ public class TestHashSemiJoinExec {
     peopleSchema.addColumn("age", Type.INT4);
     TableMeta peopleMeta = CatalogUtil.newTableMeta("CSV");
     Path peoplePath = new Path(testDir, "people.csv");
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(peopleMeta, peopleSchema, peoplePath);
     appender.init();
     tuple = new VTuple(peopleSchema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
index 9afc51f..127d309 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestLeftOuterHashJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -106,7 +105,7 @@ public class TestLeftOuterHashJoinExec {
 
     TableMeta dep3Meta = CatalogUtil.newTableMeta("CSV");
     Path dep3Path = new Path(testDir, "dep3.csv");
-    Appender appender1 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender1 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep3Meta, dep3Schema, dep3Path);
     appender1.init();
     Tuple tuple = new VTuple(dep3Schema.size());
@@ -136,7 +135,7 @@ public class TestLeftOuterHashJoinExec {
 
     TableMeta job3Meta = CatalogUtil.newTableMeta("CSV");
     Path job3Path = new Path(testDir, "job3.csv");
-    Appender appender2 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender2 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(job3Meta, job3Schema, job3Path);
     appender2.init();
     Tuple tuple2 = new VTuple(job3Schema.size());
@@ -176,7 +175,7 @@ public class TestLeftOuterHashJoinExec {
 
     TableMeta emp3Meta = CatalogUtil.newTableMeta("CSV");
     Path emp3Path = new Path(testDir, "emp3.csv");
-    Appender appender3 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender3 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(emp3Meta, emp3Schema, emp3Path);
     appender3.init();
     Tuple tuple3 = new VTuple(emp3Schema.size());
@@ -229,7 +228,7 @@ public class TestLeftOuterHashJoinExec {
 
     TableMeta phone3Meta = CatalogUtil.newTableMeta("CSV");
     Path phone3Path = new Path(testDir, "phone3.csv");
-    Appender appender5 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender5 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(phone3Meta, phone3Schema, phone3Path);
     appender5.init();
     

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestMergeJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestMergeJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestMergeJoinExec.java
index 6e7b5dc..c83d436 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestMergeJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestMergeJoinExec.java
@@ -24,7 +24,6 @@ import org.apache.tajo.TajoConstants;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -86,7 +85,7 @@ public class TestMergeJoinExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, employeeSchema, employeePath);
     appender.init();
     Tuple tuple = new VTuple(employeeSchema.size());
@@ -115,7 +114,7 @@ public class TestMergeJoinExec {
     peopleSchema.addColumn("age", Type.INT4);
     TableMeta peopleMeta = CatalogUtil.newTableMeta("CSV");
     Path peoplePath = new Path(testDir, "people.csv");
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(peopleMeta, peopleSchema, peoplePath);
     appender.init();
     tuple = new VTuple(peopleSchema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestNLJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestNLJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestNLJoinExec.java
index 96c28ee..ff9fdae 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestNLJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestNLJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -84,7 +83,7 @@ public class TestNLJoinExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("CSV");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, schema, employeePath);
     appender.init();
     Tuple tuple = new VTuple(schema.size());
@@ -108,7 +107,7 @@ public class TestNLJoinExec {
     peopleSchema.addColumn("age", Type.INT4);
     TableMeta peopleMeta = CatalogUtil.newTableMeta("CSV");
     Path peoplePath = new Path(testDir, "people.csv");
-    appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(peopleMeta, peopleSchema, peoplePath);
     appender.init();
     tuple = new VTuple(peopleSchema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestPhysicalPlanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestPhysicalPlanner.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestPhysicalPlanner.java
index 3e0b231..8c72d39 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestPhysicalPlanner.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestPhysicalPlanner.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.tajo.*;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
@@ -99,7 +98,7 @@ public class TestPhysicalPlanner {
     util.startCatalogCluster();
     conf = util.getConfiguration();
     testDir = CommonTestingUtil.getTestDir(TajoTestingCluster.DEFAULT_TEST_DIRECTORY + "/TestPhysicalPlanner");
-    sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
     catalog = util.getMiniCatalogCluster().getCatalog();
     catalog.createTablespace(DEFAULT_TABLESPACE_NAME, testDir.toUri().toString());
     catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
@@ -181,7 +180,7 @@ public class TestPhysicalPlanner {
 
     Schema scoreSchmea = score.getSchema();
     TableMeta scoreLargeMeta = CatalogUtil.newTableMeta("RAW", new KeyValueSet());
-    Appender appender =  ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender =  ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(scoreLargeMeta, scoreSchmea, scoreLargePath);
     appender.enableStats();
     appender.init();
@@ -443,7 +442,7 @@ public class TestPhysicalPlanner {
     exec.next();
     exec.close();
 
-    Scanner scanner =  ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Scanner scanner =  ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getFileScanner(outputMeta, rootNode.getOutSchema(), ctx.getOutputPath());
     scanner.init();
     Tuple tuple;
@@ -503,7 +502,7 @@ public class TestPhysicalPlanner {
     // checking the file contents
     long totalNum = 0;
     for (FileStatus status : fs.listStatus(ctx.getOutputPath().getParent())) {
-      Scanner scanner =  ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getFileScanner(
+      Scanner scanner =  ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getFileScanner(
           CatalogUtil.newTableMeta("CSV"),
           rootNode.getOutSchema(),
           status.getPath());
@@ -540,7 +539,7 @@ public class TestPhysicalPlanner {
     exec.next();
     exec.close();
 
-    Scanner scanner = ((FileStorageManager)StorageManager.getFileStorageManager(conf)).getFileScanner(
+    Scanner scanner = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf)).getFileScanner(
         outputMeta, rootNode.getOutSchema(), ctx.getOutputPath());
     scanner.init();
     Tuple tuple;

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestProgressExternalSortExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestProgressExternalSortExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestProgressExternalSortExec.java
index d56c3b0..94ebe51 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestProgressExternalSortExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestProgressExternalSortExec.java
@@ -25,7 +25,6 @@ import org.apache.tajo.TajoConstants;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes;
 import org.apache.tajo.conf.TajoConf;
@@ -87,7 +86,7 @@ public class TestProgressExternalSortExec {
 
     TableMeta employeeMeta = CatalogUtil.newTableMeta("RAW");
     Path employeePath = new Path(testDir, "employee.csv");
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, schema, employeePath);
     appender.enableStats();
     appender.init();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterHashJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterHashJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterHashJoinExec.java
index 097d75b..3455cb3 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterHashJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterHashJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -101,7 +100,7 @@ public class TestRightOuterHashJoinExec {
 
     TableMeta dep3Meta = CatalogUtil.newTableMeta("CSV");
     Path dep3Path = new Path(testDir, "dep3.csv");
-    Appender appender1 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender1 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep3Meta, dep3Schema, dep3Path);
     appender1.init();
     Tuple tuple = new VTuple(dep3Schema.size());
@@ -131,7 +130,7 @@ public class TestRightOuterHashJoinExec {
 
     TableMeta job3Meta = CatalogUtil.newTableMeta("CSV");
     Path job3Path = new Path(testDir, "job3.csv");
-    Appender appender2 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender2 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(job3Meta, job3Schema, job3Path);
     appender2.init();
     Tuple tuple2 = new VTuple(job3Schema.size());
@@ -171,7 +170,7 @@ public class TestRightOuterHashJoinExec {
 
     TableMeta emp3Meta = CatalogUtil.newTableMeta("CSV");
     Path emp3Path = new Path(testDir, "emp3.csv");
-    Appender appender3 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender3 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(emp3Meta, emp3Schema, emp3Path);
     appender3.init();
     Tuple tuple3 = new VTuple(emp3Schema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterMergeJoinExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterMergeJoinExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterMergeJoinExec.java
index 181f70e..c6bf2ef 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterMergeJoinExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestRightOuterMergeJoinExec.java
@@ -23,7 +23,6 @@ import org.apache.tajo.LocalTajoTestingUtility;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -108,7 +107,7 @@ public class TestRightOuterMergeJoinExec {
 
     TableMeta dep3Meta = CatalogUtil.newTableMeta("CSV");
     Path dep3Path = new Path(testDir, "dep3.csv");
-    Appender appender1 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender1 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep3Meta, dep3Schema, dep3Path);
     appender1.init();
     Tuple tuple = new VTuple(dep3Schema.size());
@@ -147,7 +146,7 @@ public class TestRightOuterMergeJoinExec {
 
     TableMeta dep4Meta = CatalogUtil.newTableMeta("CSV");
     Path dep4Path = new Path(testDir, "dep4.csv");
-    Appender appender4 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender4 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(dep4Meta, dep4Schema, dep4Path);
     appender4.init();
     Tuple tuple4 = new VTuple(dep4Schema.size());
@@ -179,7 +178,7 @@ public class TestRightOuterMergeJoinExec {
 
     TableMeta job3Meta = CatalogUtil.newTableMeta("CSV");
     Path job3Path = new Path(testDir, "job3.csv");
-    Appender appender2 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender2 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(job3Meta, job3Schema, job3Path);
     appender2.init();
     Tuple tuple2 = new VTuple(job3Schema.size());
@@ -219,7 +218,7 @@ public class TestRightOuterMergeJoinExec {
 
     TableMeta emp3Meta = CatalogUtil.newTableMeta("CSV");
     Path emp3Path = new Path(testDir, "emp3.csv");
-    Appender appender3 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender3 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(emp3Meta, emp3Schema, emp3Path);
     appender3.init();
     Tuple tuple3 = new VTuple(emp3Schema.size());
@@ -272,7 +271,7 @@ public class TestRightOuterMergeJoinExec {
 
     TableMeta phone3Meta = CatalogUtil.newTableMeta("CSV");
     Path phone3Path = new Path(testDir, "phone3.csv");
-    Appender appender5 = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender5 = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(phone3Meta, phone3Schema, phone3Path);
     appender5.init();
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
index 32746f8..a350831 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/planner/physical/TestSortExec.java
@@ -25,7 +25,6 @@ import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.TpchTestBase;
 import org.apache.tajo.algebra.Expr;
 import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.datum.Datum;
@@ -70,7 +69,7 @@ public class TestSortExec {
     util = TpchTestBase.getInstance().getTestingCluster();
     catalog = util.getMaster().getCatalog();
     workDir = CommonTestingUtil.getTestDir(TEST_PATH);
-    sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
 
     Schema schema = new Schema();
     schema.addColumn("managerid", Type.INT4);
@@ -82,7 +81,7 @@ public class TestSortExec {
     tablePath = StorageUtil.concatPath(workDir, "employee", "table1");
     sm.getFileSystem().mkdirs(tablePath.getParent());
 
-    Appender appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+    Appender appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
         .getAppender(employeeMeta, schema, tablePath);
     appender.init();
     Tuple tuple = new VTuple(schema.size());

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/query/TestHBaseTable.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestHBaseTable.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestHBaseTable.java
index 2bc16c6..d2faf7e 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestHBaseTable.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestHBaseTable.java
@@ -32,13 +32,13 @@ import org.apache.tajo.QueryTestCaseBase;
 import org.apache.tajo.TajoTestingCluster;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableDesc;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.common.TajoDataTypes.Type;
 import org.apache.tajo.datum.TextDatum;
 import org.apache.tajo.plan.expr.*;
 import org.apache.tajo.plan.logical.ScanNode;
 import org.apache.tajo.storage.StorageConstants;
 import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.storage.fragment.Fragment;
 import org.apache.tajo.storage.hbase.*;
 import org.apache.tajo.util.Bytes;
@@ -205,7 +205,7 @@ public class TestHBaseTable extends QueryTestCaseBase {
 
     assertTableExists("external_hbase_mapped_table");
 
-    HConnection hconn = ((HBaseStorageManager)StorageManager.getStorageManager(conf, "HBASE"))
+    HConnection hconn = ((HBaseStorageManager) TableSpaceManager.getStorageManager(conf, "HBASE"))
         .getConnection(testingCluster.getHBaseUtil().getConf());
     HTableInterface htable = hconn.getTable("external_hbase_table");
 
@@ -244,7 +244,7 @@ public class TestHBaseTable extends QueryTestCaseBase {
 
     assertTableExists("external_hbase_mapped_table");
 
-    HConnection hconn = ((HBaseStorageManager)StorageManager.getStorageManager(conf, "HBASE"))
+    HConnection hconn = ((HBaseStorageManager) TableSpaceManager.getStorageManager(conf, "HBASE"))
         .getConnection(testingCluster.getHBaseUtil().getConf());
     HTableInterface htable = hconn.getTable("external_hbase_table");
 
@@ -297,7 +297,7 @@ public class TestHBaseTable extends QueryTestCaseBase {
 
     assertTableExists("external_hbase_mapped_table");
 
-    HConnection hconn = ((HBaseStorageManager)StorageManager.getStorageManager(conf, "HBASE"))
+    HConnection hconn = ((HBaseStorageManager) TableSpaceManager.getStorageManager(conf, "HBASE"))
         .getConnection(testingCluster.getHBaseUtil().getConf());
     HTableInterface htable = hconn.getTable("external_hbase_table");
 
@@ -334,7 +334,7 @@ public class TestHBaseTable extends QueryTestCaseBase {
 
     assertTableExists("external_hbase_mapped_table");
 
-    HConnection hconn = ((HBaseStorageManager)StorageManager.getStorageManager(conf, "HBASE"))
+    HConnection hconn = ((HBaseStorageManager) TableSpaceManager.getStorageManager(conf, "HBASE"))
         .getConnection(testingCluster.getHBaseUtil().getConf());
     HTableInterface htable = hconn.getTable("external_hbase_table");
 
@@ -469,7 +469,7 @@ public class TestHBaseTable extends QueryTestCaseBase {
     EvalNode evalNodeEq = new BinaryEval(EvalType.EQUAL, new FieldEval(tableDesc.getLogicalSchema().getColumn("rk")),
         new ConstEval(new TextDatum("021")));
     scanNode.setQual(evalNodeEq);
-    StorageManager storageManager = StorageManager.getStorageManager(conf, "HBASE");
+    StorageManager storageManager = TableSpaceManager.getStorageManager(conf, "HBASE");
     List<Fragment> fragments = storageManager.getSplits("hbase_mapped_table", tableDesc, scanNode);
     assertEquals(1, fragments.size());
     assertEquals("021", new String(((HBaseFragment)fragments.get(0)).getStartRow()));

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
index a1eceea..8387abd 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestJoinBroadcast.java
@@ -569,7 +569,7 @@ public class TestJoinBroadcast extends QueryTestCaseBase {
         }
         Path dataPath = new Path(table.getPath().toString(), fileIndex + ".csv");
         fileIndex++;
-        appender = ((FileStorageManager)StorageManager.getFileStorageManager(conf))
+        appender = ((FileStorageManager) TableSpaceManager.getFileStorageManager(conf))
             .getAppender(tableMeta, schema, dataPath);
         appender.init();
       }

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/jdbc/TestResultSet.java b/tajo-core/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
index 1e4543c..70d07c3 100644
--- a/tajo-core/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
+++ b/tajo-core/src/test/java/org/apache/tajo/jdbc/TestResultSet.java
@@ -66,7 +66,7 @@ public class TestResultSet {
   public static void setup() throws Exception {
     util = TpchTestBase.getInstance().getTestingCluster();
     conf = util.getConfiguration();
-    sm = (FileStorageManager)StorageManager.getFileStorageManager(conf);
+    sm = (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
 
     scoreSchema = new Schema();
     scoreSchema.addColumn("deptname", Type.TEXT);

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java b/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
index 3d23d16..5efdede 100644
--- a/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
+++ b/tajo-core/src/test/java/org/apache/tajo/master/TestExecutionBlockCursor.java
@@ -23,7 +23,6 @@ import org.apache.tajo.catalog.CatalogService;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.TableDesc;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.conf.TajoConf;
 import org.apache.tajo.engine.parser.SQLAnalyzer;
@@ -35,6 +34,7 @@ import org.apache.tajo.engine.planner.global.GlobalPlanner;
 import org.apache.tajo.engine.planner.global.MasterPlan;
 import org.apache.tajo.engine.query.QueryContext;
 import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.TableSpaceManager;
 import org.apache.tajo.util.CommonTestingUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -82,7 +82,7 @@ public class TestExecutionBlockCursor {
     logicalPlanner = new LogicalPlanner(catalog);
     optimizer = new LogicalOptimizer(conf);
 
-    StorageManager sm  = StorageManager.getFileStorageManager(conf);
+    StorageManager sm  = TableSpaceManager.getFileStorageManager(conf);
     dispatcher = new AsyncDispatcher();
     dispatcher.init(conf);
     dispatcher.start();

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-core/src/test/java/org/apache/tajo/storage/TestRowFile.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/storage/TestRowFile.java b/tajo-core/src/test/java/org/apache/tajo/storage/TestRowFile.java
index 362cb24..52b59ea 100644
--- a/tajo-core/src/test/java/org/apache/tajo/storage/TestRowFile.java
+++ b/tajo-core/src/test/java/org/apache/tajo/storage/TestRowFile.java
@@ -27,7 +27,6 @@ import org.apache.tajo.TpchTestBase;
 import org.apache.tajo.catalog.CatalogUtil;
 import org.apache.tajo.catalog.Schema;
 import org.apache.tajo.catalog.TableMeta;
-import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
 import org.apache.tajo.catalog.proto.CatalogProtos.TableProto;
 import org.apache.tajo.catalog.statistics.TableStats;
 import org.apache.tajo.common.TajoDataTypes.Type;
@@ -69,7 +68,7 @@ public class TestRowFile {
     TableMeta meta = CatalogUtil.newTableMeta("ROWFILE");
 
     FileStorageManager sm =
-        (FileStorageManager)StorageManager.getFileStorageManager(conf);
+        (FileStorageManager) TableSpaceManager.getFileStorageManager(conf);
 
     Path tablePath = new Path("/test");
     Path metaPath = new Path(tablePath, ".meta");
@@ -110,7 +109,7 @@ public class TestRowFile {
 
     int tupleCnt = 0;
     start = System.currentTimeMillis();
-    Scanner scanner = StorageManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
+    Scanner scanner = TableSpaceManager.getFileStorageManager(conf).getScanner(meta, schema, fragment);
     scanner.init();
     while ((tuple=scanner.next()) != null) {
       tupleCnt++;

http://git-wip-us.apache.org/repos/asf/tajo/blob/5491f0e7/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/MergeScanner.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/MergeScanner.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/MergeScanner.java
index 5423fd7..d007aea 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/MergeScanner.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/MergeScanner.java
@@ -131,7 +131,7 @@ public class MergeScanner implements Scanner {
   private Scanner getNextScanner() throws IOException {
     if (iterator.hasNext()) {
       currentFragment = iterator.next();
-      currentScanner = StorageManager.getStorageManager((TajoConf)conf, meta.getStoreType()).getScanner(meta, schema,
+      currentScanner = TableSpaceManager.getStorageManager((TajoConf) conf, meta.getStoreType()).getScanner(meta, schema,
           currentFragment, target);
       currentScanner.init();
       return currentScanner;