You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tajo.apache.org by ji...@apache.org on 2014/12/05 03:22:25 UTC

[1/4] tajo git commit: TAJO-1191: Change DateDatum timezone to UTC. (Jaewoong Jung via hyunsik)

Repository: tajo
Updated Branches:
  refs/heads/index_support 5c11283a6 -> 72600c138


TAJO-1191: Change DateDatum timezone to UTC. (Jaewoong Jung via hyunsik)

Closes #278


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/97507e45
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/97507e45
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/97507e45

Branch: refs/heads/index_support
Commit: 97507e45883c9db2fec2f5a9a9e544384a86ccd0
Parents: 9f8be1a
Author: Hyunsik Choi <hy...@apache.org>
Authored: Thu Dec 4 16:12:25 2014 +0900
Committer: Hyunsik Choi <hy...@apache.org>
Committed: Thu Dec 4 16:12:25 2014 +0900

----------------------------------------------------------------------
 CHANGES                                                  |  2 ++
 .../src/main/java/org/apache/tajo/datum/DateDatum.java   | 11 +----------
 .../main/java/org/apache/tajo/datum/IntervalDatum.java   |  1 -
 .../src/main/java/org/apache/tajo/datum/TimeDatum.java   |  4 ----
 .../java/org/apache/tajo/datum/TestIntervalDatum.java    |  8 ++++----
 5 files changed, 7 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/97507e45/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 60aa3e0..a697afe 100644
--- a/CHANGES
+++ b/CHANGES
@@ -83,6 +83,8 @@ Release 0.9.1 - unreleased
 
   BUG FIXES
 
+    TAJO-1191: Change DateDatum timezone to UTC. (Jaewoong Jung via hyunsik)
+
     TAJO-1224: When there is no projected column, json scan can be hang. 
     (hyunsik) 
 

http://git-wip-us.apache.org/repos/asf/tajo/blob/97507e45/tajo-common/src/main/java/org/apache/tajo/datum/DateDatum.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/datum/DateDatum.java b/tajo-common/src/main/java/org/apache/tajo/datum/DateDatum.java
index 3296d4f..188d226 100644
--- a/tajo-common/src/main/java/org/apache/tajo/datum/DateDatum.java
+++ b/tajo-common/src/main/java/org/apache/tajo/datum/DateDatum.java
@@ -31,6 +31,7 @@ import org.apache.tajo.util.datetime.TimeMeta;
 public class DateDatum extends Datum {
   public static final int SIZE = 4;
 
+  // Dates are stored in UTC.
   private int year;
   private int monthOfYear;
   private int dayOfMonth;
@@ -119,17 +120,12 @@ public class DateDatum extends Datum {
         if (interval.getMonths() > 0) {
           tm.plusMonths(interval.getMonths());
         }
-        DateTimeUtil.toUTCTimezone(tm);
         return new TimestampDatum(DateTimeUtil.toJulianTimestamp(tm));
       }
       case TIME: {
         TimeMeta tm1 = toTimeMeta();
-
         TimeMeta tm2 = ((TimeDatum)datum).toTimeMeta();
-        DateTimeUtil.toUserTimezone(tm2);     //TimeDatum is UTC
-
         tm1.plusTime(DateTimeUtil.toTime(tm2));
-        DateTimeUtil.toUTCTimezone(tm1);
         return new TimestampDatum(DateTimeUtil.toJulianTimestamp(tm1));
       }
       default:
@@ -155,17 +151,12 @@ public class DateDatum extends Datum {
           tm.plusMonths(0 - interval.getMonths());
         }
         tm.plusMillis(0 - interval.getMilliSeconds());
-        DateTimeUtil.toUTCTimezone(tm);
         return new TimestampDatum(DateTimeUtil.toJulianTimestamp(tm));
       }
       case TIME: {
         TimeMeta tm1 = toTimeMeta();
-
         TimeMeta tm2 = ((TimeDatum)datum).toTimeMeta();
-        DateTimeUtil.toUserTimezone(tm2);     //TimeDatum is UTC
-
         tm1.plusTime(0 - DateTimeUtil.toTime(tm2));
-        DateTimeUtil.toUTCTimezone(tm1);
         return new TimestampDatum(DateTimeUtil.toJulianTimestamp(tm1));
       }
       case DATE: {

http://git-wip-us.apache.org/repos/asf/tajo/blob/97507e45/tajo-common/src/main/java/org/apache/tajo/datum/IntervalDatum.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/datum/IntervalDatum.java b/tajo-common/src/main/java/org/apache/tajo/datum/IntervalDatum.java
index c6f3922..6207891 100644
--- a/tajo-common/src/main/java/org/apache/tajo/datum/IntervalDatum.java
+++ b/tajo-common/src/main/java/org/apache/tajo/datum/IntervalDatum.java
@@ -236,7 +236,6 @@ public class IntervalDatum extends Datum {
         if (getMonths() > 0) {
           tm.plusMonths(getMonths());
         }
-        DateTimeUtil.toUTCTimezone(tm);
         return new TimestampDatum(DateTimeUtil.toJulianTimestamp(tm));
       }
       case TIME: {

http://git-wip-us.apache.org/repos/asf/tajo/blob/97507e45/tajo-common/src/main/java/org/apache/tajo/datum/TimeDatum.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/datum/TimeDatum.java b/tajo-common/src/main/java/org/apache/tajo/datum/TimeDatum.java
index 37e5e78..6cac586 100644
--- a/tajo-common/src/main/java/org/apache/tajo/datum/TimeDatum.java
+++ b/tajo-common/src/main/java/org/apache/tajo/datum/TimeDatum.java
@@ -128,13 +128,9 @@ public class TimeDatum extends Datum {
       }
       case DATE: {
         TimeMeta tm = toTimeMeta();
-        DateTimeUtil.toUserTimezone(tm);     //TimeDatum is UTC
-
         DateDatum dateDatum = (DateDatum) datum;
         TimeMeta dateTm = dateDatum.toTimeMeta();
         dateTm.plusTime(DateTimeUtil.toTime(tm));
-
-        DateTimeUtil.toUTCTimezone(dateTm);
         return new TimestampDatum(DateTimeUtil.toJulianTimestamp(dateTm));
       }
       default:

http://git-wip-us.apache.org/repos/asf/tajo/blob/97507e45/tajo-common/src/test/java/org/apache/tajo/datum/TestIntervalDatum.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/test/java/org/apache/tajo/datum/TestIntervalDatum.java b/tajo-common/src/test/java/org/apache/tajo/datum/TestIntervalDatum.java
index 511b356..2646ee7 100644
--- a/tajo-common/src/test/java/org/apache/tajo/datum/TestIntervalDatum.java
+++ b/tajo-common/src/test/java/org/apache/tajo/datum/TestIntervalDatum.java
@@ -79,13 +79,13 @@ public class TestIntervalDatum {
     datum = DatumFactory.createDate(2001, 9, 28);
     Datum result = datum.plus(new IntervalDatum(60 * 60 * 1000));
     assertEquals(TajoDataTypes.Type.TIMESTAMP, result.type());
-    assertEquals("2001-09-28 01:00:00", ((TimestampDatum)result).asChars(TajoConf.getCurrentTimeZone(), false));
+    assertEquals("2001-09-28 01:00:00", result.asChars());
 
     // interval '1 hour' +  date '2001-09-28'	==> timestamp '2001-09-28 01:00:00'
     datum = new IntervalDatum(60 * 60 * 1000);
     result = datum.plus(DatumFactory.createDate(2001, 9, 28));
     assertEquals(TajoDataTypes.Type.TIMESTAMP, result.type());
-    assertEquals("2001-09-28 01:00:00", ((TimestampDatum)result).asChars(TajoConf.getCurrentTimeZone(), false));
+    assertEquals("2001-09-28 01:00:00", result.asChars());
 
     // date '2001-09-28' + time '03:00' ==> timestamp '2001-09-28 03:00:00'
     datum = DatumFactory.createDate(2001, 9, 28);
@@ -133,14 +133,14 @@ public class TestIntervalDatum {
     datum = DatumFactory.createDate(2001, 9, 28);
     result = datum.minus(new IntervalDatum(1 * 60 * 60 * 1000));
     assertEquals(TajoDataTypes.Type.TIMESTAMP, result.type());
-    assertEquals("2001-09-27 23:00:00", ((TimestampDatum)result).asChars(TajoConf.getCurrentTimeZone(), false));
+    assertEquals("2001-09-27 23:00:00", result.asChars());
 
     // date '2001-09-28' - interval '1 day 1 hour' ==> timestamp '2001-09-26 23:00:00'
     // In this case all datums are UTC
     datum = DatumFactory.createDate(2001, 9, 28);
     result = datum.minus(new IntervalDatum(IntervalDatum.DAY_MILLIS + 1 * 60 * 60 * 1000));
     assertEquals(TajoDataTypes.Type.TIMESTAMP, result.type());
-    assertEquals("2001-09-26 23:00:00",  ((TimestampDatum)result).asChars(TajoConf.getCurrentTimeZone(), false));
+    assertEquals("2001-09-26 23:00:00",  result.asChars());
 
     // time '05:00' - time '03:00' ==>	interval '02:00:00'
     datum = new TimeDatum(DateTimeUtil.toTime(5, 0, 0, 0));


[2/4] tajo git commit: TAJO-1194: 'INSERT OVERWRITE .. SELECT' does not remove existing data when result is empty. (jaehwa)

Posted by ji...@apache.org.
TAJO-1194: 'INSERT OVERWRITE .. SELECT' does not remove existing data when result is empty. (jaehwa)

Closes #254


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/f6da07b0
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/f6da07b0
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/f6da07b0

Branch: refs/heads/index_support
Commit: f6da07b03587d2bce5ca4ac53d86eae3a8ba23c2
Parents: 97507e4
Author: JaeHwa Jung <bl...@apache.org>
Authored: Thu Dec 4 18:45:13 2014 +0900
Committer: JaeHwa Jung <bl...@apache.org>
Committed: Thu Dec 4 18:47:11 2014 +0900

----------------------------------------------------------------------
 CHANGES                                         |   3 +
 .../apache/tajo/master/querymaster/Query.java   |   3 +-
 .../tajo/engine/query/TestTablePartitions.java  | 243 ++++++++-----------
 3 files changed, 106 insertions(+), 143 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/f6da07b0/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index a697afe..0c785ce 100644
--- a/CHANGES
+++ b/CHANGES
@@ -83,6 +83,9 @@ Release 0.9.1 - unreleased
 
   BUG FIXES
 
+    TAJO-1194: 'INSERT OVERWRITE .. SELECT' does not remove existing data when result is empty.
+    (jaehwa)
+
     TAJO-1191: Change DateDatum timezone to UTC. (Jaewoong Jung via hyunsik)
 
     TAJO-1224: When there is no projected column, json scan can be hang. 

http://git-wip-us.apache.org/repos/asf/tajo/blob/f6da07b0/tajo-core/src/main/java/org/apache/tajo/master/querymaster/Query.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/querymaster/Query.java b/tajo-core/src/main/java/org/apache/tajo/master/querymaster/Query.java
index 07b47c1..6f80171 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/querymaster/Query.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/querymaster/Query.java
@@ -465,8 +465,9 @@ public class Query implements EventHandler<QueryEvent> {
             boolean movedToOldTable = false;
             boolean committed = false;
             Path oldTableDir = new Path(queryContext.getStagingDir(), TajoConstants.INSERT_OVERWIRTE_OLD_TABLE_NAME);
+            ContentSummary summary = fs.getContentSummary(stagingResultDir);
 
-            if (queryContext.hasPartition()) {
+            if (queryContext.hasPartition() && summary.getFileCount() > 0L) {
               // This is a map for existing non-leaf directory to rename. A key is current directory and a value is
               // renaming directory.
               Map<Path, Path> renameDirs = TUtil.newHashMap();

http://git-wip-us.apache.org/repos/asf/tajo/blob/f6da07b0/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
index cff5bfb..15cbde0 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
@@ -19,7 +19,7 @@
 package org.apache.tajo.engine.query;
 
 import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -44,7 +44,6 @@ import org.apache.tajo.jdbc.TajoResultSet;
 import org.apache.tajo.master.querymaster.QueryMasterTask;
 import org.apache.tajo.plan.logical.NodeType;
 import org.apache.tajo.storage.StorageConstants;
-import org.apache.tajo.util.CommonTestingUtil;
 import org.apache.tajo.util.KeyValueSet;
 import org.apache.tajo.worker.TajoWorker;
 import org.junit.Test;
@@ -71,7 +70,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testCreateColumnPartitionedTable() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testCreateColumnPartitionedTable");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
@@ -79,8 +78,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
 
     res = testBase.execute(
-        "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
-            "l_quantity from lineitem");
+      "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
+        "l_quantity from lineitem");
 
     MasterPlan plan = getQueryPlan(res);
     ExecutionBlock rootEB = plan.getRoot();
@@ -114,7 +113,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testCreateColumnPartitionedTableWithJoin() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testCreateColumnPartitionedTableWithJoin");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
@@ -122,8 +121,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
 
     res = testBase.execute(
-        "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
-            "l_quantity from lineitem join orders on l_orderkey = o_orderkey");
+      "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
+        "l_quantity from lineitem join orders on l_orderkey = o_orderkey");
 
     MasterPlan plan = getQueryPlan(res);
     ExecutionBlock rootEB = plan.getRoot();
@@ -159,7 +158,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testCreateColumnPartitionedTableWithSelectedColumns() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testCreateColumnPartitionedTableWithSelectedColumns");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
@@ -167,7 +166,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertEquals(4, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
 
     res = executeString("insert overwrite into " + tableName + " (col1, col2, key) select l_orderkey, " +
-        "l_partkey, l_quantity from lineitem");
+      "l_partkey, l_quantity from lineitem");
     res.close();
   }
 
@@ -175,20 +174,20 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByOneColumn() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByOneColumn");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString("insert overwrite into " + tableName
-        + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
+      + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     assertPartitionDirectories(desc);
 
     res = executeString(
-        "select distinct * from " + tableName + " where (key = 45.0 or key = 38.0) and null_col is null");
+      "select distinct * from " + tableName + " where (key = 45.0 or key = 38.0) and null_col is null");
 
     Map<Double, int []> resultRows1 = Maps.newHashMap();
     resultRows1.put(45.0d, new int[]{3, 2});
@@ -220,14 +219,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testQueryCasesOnColumnPartitionedTable() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testQueryCasesOnColumnPartitionedTable");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-        "insert overwrite into " + tableName
-            + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
+      "insert overwrite into " + tableName
+        + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -300,14 +299,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByThreeColumns() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByThreeColumns");
     ResultSet res = testBase.execute(
-        "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
+      "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
     res.close();
     TajoTestingCluster cluster = testBase.getTestingCluster();
     CatalogService catalog = cluster.getMaster().getCatalog();
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString("insert overwrite into " + tableName
-        + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -364,14 +363,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testInsertIntoColumnPartitionedTableByThreeColumns() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testInsertIntoColumnPartitionedTableByThreeColumns");
     ResultSet res = testBase.execute(
-        "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
+      "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
     res.close();
     TajoTestingCluster cluster = testBase.getTestingCluster();
     CatalogService catalog = cluster.getMaster().getCatalog();
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString("insert into " + tableName
-        + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -424,7 +423,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
 
     // insert into already exists partitioned table
     res = executeString("insert into " + tableName
-        + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -447,15 +446,15 @@ public class TestTablePartitions extends QueryTestCaseBase {
       assertEquals(5, desc.getStats().getNumRows().intValue());
     }
     String expected = "N\n" +
-        "N\n" +
-        "N\n" +
-        "N\n" +
-        "N\n" +
-        "N\n" +
-        "R\n" +
-        "R\n" +
-        "R\n" +
-        "R\n";
+      "N\n" +
+      "N\n" +
+      "N\n" +
+      "N\n" +
+      "N\n" +
+      "R\n" +
+      "R\n" +
+      "R\n" +
+      "R\n";
 
     String tableData = getTableFileContents(new Path(desc.getPath()));
     assertEquals(expected, tableData);
@@ -464,30 +463,30 @@ public class TestTablePartitions extends QueryTestCaseBase {
     String resultSetData = resultSetToString(res);
     res.close();
     expected = "col4,col1,col2,col3\n" +
-        "-------------------------------\n" +
-        "N,2,2,38.0\n" +
-        "N,2,2,38.0\n" +
-        "R,3,2,45.0\n" +
-        "R,3,2,45.0\n";
+      "-------------------------------\n" +
+      "N,2,2,38.0\n" +
+      "N,2,2,38.0\n" +
+      "R,3,2,45.0\n" +
+      "R,3,2,45.0\n";
     assertEquals(expected, resultSetData);
 
     res = executeString("select * from " + tableName + " where (col1 = 2 or col1 = 3) and col2 >= 2");
     resultSetData = resultSetToString(res);
     res.close();
     expected = "col4,col1,col2,col3\n" +
-        "-------------------------------\n" +
-        "N,2,2,38.0\n" +
-        "N,2,2,38.0\n" +
-        "R,3,2,45.0\n" +
-        "R,3,2,45.0\n" +
-        "R,3,3,49.0\n" +
-        "R,3,3,49.0\n";
+      "-------------------------------\n" +
+      "N,2,2,38.0\n" +
+      "N,2,2,38.0\n" +
+      "R,3,2,45.0\n" +
+      "R,3,2,45.0\n" +
+      "R,3,3,49.0\n" +
+      "R,3,3,49.0\n";
     assertEquals(expected, resultSetData);
 
     // Check not to remove existing partition directories.
     res = executeString("insert overwrite into " + tableName
-        + " select l_returnflag, l_orderkey, l_partkey, 30.0 as l_quantity from lineitem "
-        + " where l_orderkey = 1 and l_partkey = 1 and  l_linenumber = 1");
+      + " select l_returnflag, l_orderkey, l_partkey, 30.0 as l_quantity from lineitem "
+      + " where l_orderkey = 1 and l_partkey = 1 and  l_linenumber = 1");
     res.close();
 
     desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -514,28 +513,42 @@ public class TestTablePartitions extends QueryTestCaseBase {
     resultSetData = resultSetToString(res);
     res.close();
     expected = "col4,col1,col2,col3\n" +
-        "-------------------------------\n" +
-        "N,1,1,17.0\n" +
-        "N,1,1,17.0\n" +
-        "N,1,1,30.0\n" +
-        "N,1,1,36.0\n" +
-        "N,1,1,36.0\n";
+      "-------------------------------\n" +
+      "N,1,1,17.0\n" +
+      "N,1,1,17.0\n" +
+      "N,1,1,30.0\n" +
+      "N,1,1,36.0\n" +
+      "N,1,1,36.0\n";
 
     assertEquals(expected, resultSetData);
+
+    // insert overwrite empty result to partitioned table
+    res = executeString("insert overwrite into " + tableName
+      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem where l_orderkey" +
+      " > 100");
+    res.close();
+
+    desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
+
+    ContentSummary summary = fs.getContentSummary(new Path(desc.getPath()));
+
+    assertEquals(summary.getDirectoryCount(), 1L);
+    assertEquals(summary.getFileCount(), 0L);
+    assertEquals(summary.getLength(), 0L);
   }
 
   @Test
   public final void testColumnPartitionedTableByOneColumnsWithCompression() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByOneColumnsWithCompression");
     ResultSet res = executeString(
-        "create table " + tableName + " (col2 int4, col3 float8) USING csv " +
-            "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-            "PARTITION BY column(col1 int4)");
+      "create table " + tableName + " (col2 int4, col3 float8) USING csv " +
+        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+        "PARTITION BY column(col1 int4)");
     res.close();
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-        "insert overwrite into " + tableName + " select l_partkey, l_quantity, l_orderkey from lineitem");
+      "insert overwrite into " + tableName + " select l_partkey, l_quantity, l_orderkey from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -564,15 +577,15 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByTwoColumnsWithCompression() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByTwoColumnsWithCompression");
     ResultSet res = executeString("create table " + tableName + " (col3 float8, col4 text) USING csv " +
-        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-        "PARTITION by column(col1 int4, col2 int4)");
+      "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+      "PARTITION by column(col1 int4, col2 int4)");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-        "insert overwrite into " + tableName +
-            " select  l_quantity, l_returnflag, l_orderkey, l_partkey from lineitem");
+      "insert overwrite into " + tableName +
+        " select  l_quantity, l_returnflag, l_orderkey, l_partkey from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -608,16 +621,16 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByThreeColumnsWithCompression() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByThreeColumnsWithCompression");
     ResultSet res = executeString(
-        "create table " + tableName + " (col4 text) USING csv " +
-            "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-            "partition by column(col1 int4, col2 int4, col3 float8)");
+      "create table " + tableName + " (col4 text) USING csv " +
+        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+        "partition by column(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-        "insert overwrite into " + tableName +
-            " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+      "insert overwrite into " + tableName +
+        " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -691,16 +704,16 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableNoMatchedPartition() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableNoMatchedPartition");
     ResultSet res = executeString(
-        "create table " + tableName + " (col4 text) USING csv " +
-            "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-            "partition by column(col1 int4, col2 int4, col3 float8)");
+      "create table " + tableName + " (col4 text) USING csv " +
+        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+        "partition by column(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-        "insert overwrite into " + tableName +
-            " select l_returnflag , l_orderkey, l_partkey, l_quantity from lineitem");
+      "insert overwrite into " + tableName +
+        " select l_returnflag , l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -747,13 +760,13 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableWithSmallerExpressions1() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions1");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     ClientProtos.SubmitQueryResponse response = client.executeQuery("insert overwrite into " + tableName
-        + " select l_orderkey, l_partkey from lineitem");
+      + " select l_orderkey, l_partkey from lineitem");
 
     assertTrue(response.hasErrorMessage());
     assertEquals(response.getErrorMessage(), "INSERT has smaller expressions than target columns\n");
@@ -767,13 +780,13 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableWithSmallerExpressions2() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions2");
     ResultSet res = executeString(
-        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     ClientProtos.SubmitQueryResponse response = client.executeQuery("insert overwrite into " + tableName
-        + " select l_returnflag , l_orderkey, l_partkey from lineitem");
+      + " select l_returnflag , l_orderkey, l_partkey from lineitem");
 
     assertTrue(response.hasErrorMessage());
     assertEquals(response.getErrorMessage(), "INSERT has smaller expressions than target columns\n");
@@ -792,11 +805,11 @@ public class TestTablePartitions extends QueryTestCaseBase {
     res.close();
 
     res = executeString("create table testinsertquery1.table1 " +
-        "(col1 int4, col2 int4, col3 float8)");
+      "(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     res = executeString("create table testinsertquery2.table1 " +
-        "(col1 int4, col2 int4, col3 float8)");
+      "(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     CatalogService catalog = testingCluster.getMaster().getCatalog();
@@ -804,7 +817,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertTrue(catalog.existsTable("testinsertquery2", "table1"));
 
     res = executeString("insert overwrite into testinsertquery1.table1 " +
-        "select l_orderkey, l_partkey, l_quantity from default.lineitem;");
+      "select l_orderkey, l_partkey, l_quantity from default.lineitem;");
     res.close();
 
     TableDesc desc = catalog.getTableDesc("testinsertquery1", "table1");
@@ -813,7 +826,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
     }
 
     res = executeString("insert overwrite into testinsertquery2.table1 " +
-        "select col1, col2, col3 from testinsertquery1.table1;");
+      "select col1, col2, col3 from testinsertquery1.table1;");
     res.close();
 
     desc = catalog.getTableDesc("testinsertquery2", "table1");
@@ -822,39 +835,6 @@ public class TestTablePartitions extends QueryTestCaseBase {
     }
   }
 
-  @Test
-  public final void testColumnPartitionedTableWithSmallerExpressions5() throws Exception {
-    String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions5");
-    ResultSet res = executeString(
-        "create table " + tableName + " (col1 text) partition by column(col2 text) ");
-    res.close();
-
-    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
-
-    res = executeString("insert overwrite into " + tableName + "(col1) select l_returnflag from lineitem");
-    res.close();
-    res = executeString("select * from " + tableName);
-    assertResultSet(res);
-    res.close();
-  }
-
-  @Test
-  public final void testColumnPartitionedTableWithSmallerExpressions6() throws Exception {
-    String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions6");
-    ResultSet res = executeString(
-        "create table " + tableName + " (col1 text) partition by column(col2 text) ");
-    res.close();
-
-    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
-
-    res = executeString(
-        "insert overwrite into " + tableName + "(col1) select l_returnflag from lineitem where l_orderkey = 1");
-    res.close();
-    res = executeString("select * from " + tableName);
-    assertResultSet(res);
-    res.close();
-  }
-
   private MasterPlan getQueryPlan(ResultSet res) {
     QueryId queryId = ((TajoResultSet)res).getQueryId();
     for (TajoWorker eachWorker: testingCluster.getTajoWorkers()) {
@@ -885,8 +865,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
       int totalBytes = 0;
       Random rand = new Random(System.currentTimeMillis());
       String col2Data = "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2" +
-          "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2" +
-          "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2";
+        "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2" +
+        "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2";
 
       int index = 0;
       while(true) {
@@ -921,9 +901,9 @@ public class TestTablePartitions extends QueryTestCaseBase {
 
     } finally {
       testingCluster.setAllTajoDaemonConfValue(TajoConf.ConfVars.$DIST_QUERY_TABLE_PARTITION_VOLUME.varname,
-          TajoConf.ConfVars.$DIST_QUERY_TABLE_PARTITION_VOLUME.defaultVal);
+        TajoConf.ConfVars.$DIST_QUERY_TABLE_PARTITION_VOLUME.defaultVal);
       testingCluster.setAllTajoDaemonConfValue(TajoConf.ConfVars.SHUFFLE_HASH_APPENDER_PAGE_VOLUME.varname,
-          TajoConf.ConfVars.SHUFFLE_HASH_APPENDER_PAGE_VOLUME.defaultVal);
+        TajoConf.ConfVars.SHUFFLE_HASH_APPENDER_PAGE_VOLUME.defaultVal);
       executeString("DROP TABLE test_partition PURGE").close();
       executeString("DROP TABLE testScatteredHashShuffle PURGE").close();
     }
@@ -936,17 +916,17 @@ public class TestTablePartitions extends QueryTestCaseBase {
     executeDDL("lineitemspecial_ddl.sql", "lineitemspecial.tbl");
 
     executeString("CREATE TABLE IF NOT EXISTS pTable947 (id int, name text) PARTITION BY COLUMN (type text)")
-        .close();
+      .close();
     executeString("INSERT OVERWRITE INTO pTable947 SELECT l_orderkey, l_shipinstruct, l_shipmode FROM lineitemspecial")
-        .close();
+      .close();
     ResultSet res = executeString("select * from pTable947 where type='RA:*?><I/L#%S' or type='AIR'");
 
     String resStr = resultSetToString(res);
     String expected =
-        "id,name,type\n" +
-            "-------------------------------\n"
-            + "3,NONE,AIR\n"
-            + "3,TEST SPECIAL CHARS,RA:*?><I/L#%S\n";
+      "id,name,type\n" +
+        "-------------------------------\n"
+        + "3,NONE,AIR\n"
+        + "3,TEST SPECIAL CHARS,RA:*?><I/L#%S\n";
 
     assertEquals(expected, resStr);
     cleanupQuery(res);
@@ -959,9 +939,9 @@ public class TestTablePartitions extends QueryTestCaseBase {
     executeDDL("lineitemspecial_ddl.sql", "lineitemspecial.tbl");
 
     executeString("CREATE TABLE IF NOT EXISTS pTable948 (id int, name text) PARTITION BY COLUMN (type text)")
-        .close();
+      .close();
     executeString("INSERT OVERWRITE INTO pTable948 SELECT l_orderkey, l_shipinstruct, l_shipmode FROM lineitemspecial")
-        .close();
+      .close();
 
     ResultSet res = executeString("select * from pTable948 where type='RA:*?><I/L#%S'");
     assertResultSet(res);
@@ -971,25 +951,4 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertResultSet(res);
     cleanupQuery(res);
   }
-
-  @Test
-  public final void testIgnoreFilesInIntermediateDir() throws Exception {
-    // See - TAJO-1219: Files located in intermediate directories of partitioned table should be ignored
-    // It verifies that Tajo ignores files located in intermediate directories of partitioned table.
-
-    Path testDir = CommonTestingUtil.getTestDir();
-
-    executeString(
-        "CREATE EXTERNAL TABLE testIgnoreFilesInIntermediateDir (col1 int) USING CSV PARTITION BY COLUMN (col2 text) " +
-        "LOCATION '" + testDir + "'");
-
-    FileSystem fs = testDir.getFileSystem(conf);
-    FSDataOutputStream fos = fs.create(new Path(testDir, "table1.data"));
-    fos.write("a|b|c".getBytes());
-    fos.close();
-
-    ResultSet res = executeString("select * from testIgnoreFilesInIntermediateDir;");
-    assertFalse(res.next());
-    res.close();
-  }
 }


[3/4] tajo git commit: TAJO-1194: 'INSERT OVERWRITE .. SELECT' does not remove existing data when result is empty. (missing code)

Posted by ji...@apache.org.
TAJO-1194: 'INSERT OVERWRITE .. SELECT' does not remove existing data when result is empty. (missing code)


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/2a69bcc2
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/2a69bcc2
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/2a69bcc2

Branch: refs/heads/index_support
Commit: 2a69bcc218a53eced0cdb9217de1fdd1837c0827
Parents: f6da07b
Author: JaeHwa Jung <bl...@apache.org>
Authored: Thu Dec 4 18:54:03 2014 +0900
Committer: JaeHwa Jung <bl...@apache.org>
Committed: Thu Dec 4 18:54:03 2014 +0900

----------------------------------------------------------------------
 .../tajo/engine/query/TestTablePartitions.java  | 232 ++++++++++++-------
 1 file changed, 142 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/tajo/blob/2a69bcc2/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
index 15cbde0..b1e1bec 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
@@ -19,10 +19,7 @@
 package org.apache.tajo.engine.query;
 
 import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.io.compress.DeflateCodec;
@@ -44,6 +41,7 @@ import org.apache.tajo.jdbc.TajoResultSet;
 import org.apache.tajo.master.querymaster.QueryMasterTask;
 import org.apache.tajo.plan.logical.NodeType;
 import org.apache.tajo.storage.StorageConstants;
+import org.apache.tajo.util.CommonTestingUtil;
 import org.apache.tajo.util.KeyValueSet;
 import org.apache.tajo.worker.TajoWorker;
 import org.junit.Test;
@@ -70,7 +68,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testCreateColumnPartitionedTable() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testCreateColumnPartitionedTable");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
@@ -78,8 +76,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
 
     res = testBase.execute(
-      "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
-        "l_quantity from lineitem");
+        "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
+            "l_quantity from lineitem");
 
     MasterPlan plan = getQueryPlan(res);
     ExecutionBlock rootEB = plan.getRoot();
@@ -113,7 +111,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testCreateColumnPartitionedTableWithJoin() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testCreateColumnPartitionedTableWithJoin");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
@@ -121,8 +119,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
 
     res = testBase.execute(
-      "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
-        "l_quantity from lineitem join orders on l_orderkey = o_orderkey");
+        "insert overwrite into " + tableName + " select l_orderkey, l_partkey, " +
+            "l_quantity from lineitem join orders on l_orderkey = o_orderkey");
 
     MasterPlan plan = getQueryPlan(res);
     ExecutionBlock rootEB = plan.getRoot();
@@ -158,7 +156,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testCreateColumnPartitionedTableWithSelectedColumns() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testCreateColumnPartitionedTableWithSelectedColumns");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
@@ -166,7 +164,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertEquals(4, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
 
     res = executeString("insert overwrite into " + tableName + " (col1, col2, key) select l_orderkey, " +
-      "l_partkey, l_quantity from lineitem");
+        "l_partkey, l_quantity from lineitem");
     res.close();
   }
 
@@ -174,20 +172,20 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByOneColumn() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByOneColumn");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString("insert overwrite into " + tableName
-      + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
+        + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     assertPartitionDirectories(desc);
 
     res = executeString(
-      "select distinct * from " + tableName + " where (key = 45.0 or key = 38.0) and null_col is null");
+        "select distinct * from " + tableName + " where (key = 45.0 or key = 38.0) and null_col is null");
 
     Map<Double, int []> resultRows1 = Maps.newHashMap();
     resultRows1.put(45.0d, new int[]{3, 2});
@@ -219,14 +217,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testQueryCasesOnColumnPartitionedTable() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testQueryCasesOnColumnPartitionedTable");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-      "insert overwrite into " + tableName
-        + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
+        "insert overwrite into " + tableName
+            + " (col1, col2, key) select l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -299,14 +297,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByThreeColumns() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByThreeColumns");
     ResultSet res = testBase.execute(
-      "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
+        "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
     res.close();
     TajoTestingCluster cluster = testBase.getTestingCluster();
     CatalogService catalog = cluster.getMaster().getCatalog();
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString("insert overwrite into " + tableName
-      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+        + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -363,14 +361,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testInsertIntoColumnPartitionedTableByThreeColumns() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testInsertIntoColumnPartitionedTableByThreeColumns");
     ResultSet res = testBase.execute(
-      "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
+        "create table " + tableName + " (col4 text) partition by column(col1 int4, col2 int4, col3 float8) ");
     res.close();
     TajoTestingCluster cluster = testBase.getTestingCluster();
     CatalogService catalog = cluster.getMaster().getCatalog();
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString("insert into " + tableName
-      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+        + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -423,7 +421,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
 
     // insert into already exists partitioned table
     res = executeString("insert into " + tableName
-      + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+        + " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
 
     desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -446,15 +444,15 @@ public class TestTablePartitions extends QueryTestCaseBase {
       assertEquals(5, desc.getStats().getNumRows().intValue());
     }
     String expected = "N\n" +
-      "N\n" +
-      "N\n" +
-      "N\n" +
-      "N\n" +
-      "N\n" +
-      "R\n" +
-      "R\n" +
-      "R\n" +
-      "R\n";
+        "N\n" +
+        "N\n" +
+        "N\n" +
+        "N\n" +
+        "N\n" +
+        "R\n" +
+        "R\n" +
+        "R\n" +
+        "R\n";
 
     String tableData = getTableFileContents(new Path(desc.getPath()));
     assertEquals(expected, tableData);
@@ -463,30 +461,30 @@ public class TestTablePartitions extends QueryTestCaseBase {
     String resultSetData = resultSetToString(res);
     res.close();
     expected = "col4,col1,col2,col3\n" +
-      "-------------------------------\n" +
-      "N,2,2,38.0\n" +
-      "N,2,2,38.0\n" +
-      "R,3,2,45.0\n" +
-      "R,3,2,45.0\n";
+        "-------------------------------\n" +
+        "N,2,2,38.0\n" +
+        "N,2,2,38.0\n" +
+        "R,3,2,45.0\n" +
+        "R,3,2,45.0\n";
     assertEquals(expected, resultSetData);
 
     res = executeString("select * from " + tableName + " where (col1 = 2 or col1 = 3) and col2 >= 2");
     resultSetData = resultSetToString(res);
     res.close();
     expected = "col4,col1,col2,col3\n" +
-      "-------------------------------\n" +
-      "N,2,2,38.0\n" +
-      "N,2,2,38.0\n" +
-      "R,3,2,45.0\n" +
-      "R,3,2,45.0\n" +
-      "R,3,3,49.0\n" +
-      "R,3,3,49.0\n";
+        "-------------------------------\n" +
+        "N,2,2,38.0\n" +
+        "N,2,2,38.0\n" +
+        "R,3,2,45.0\n" +
+        "R,3,2,45.0\n" +
+        "R,3,3,49.0\n" +
+        "R,3,3,49.0\n";
     assertEquals(expected, resultSetData);
 
     // Check not to remove existing partition directories.
     res = executeString("insert overwrite into " + tableName
-      + " select l_returnflag, l_orderkey, l_partkey, 30.0 as l_quantity from lineitem "
-      + " where l_orderkey = 1 and l_partkey = 1 and  l_linenumber = 1");
+        + " select l_returnflag, l_orderkey, l_partkey, 30.0 as l_quantity from lineitem "
+        + " where l_orderkey = 1 and l_partkey = 1 and  l_linenumber = 1");
     res.close();
 
     desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
@@ -513,12 +511,12 @@ public class TestTablePartitions extends QueryTestCaseBase {
     resultSetData = resultSetToString(res);
     res.close();
     expected = "col4,col1,col2,col3\n" +
-      "-------------------------------\n" +
-      "N,1,1,17.0\n" +
-      "N,1,1,17.0\n" +
-      "N,1,1,30.0\n" +
-      "N,1,1,36.0\n" +
-      "N,1,1,36.0\n";
+        "-------------------------------\n" +
+        "N,1,1,17.0\n" +
+        "N,1,1,17.0\n" +
+        "N,1,1,30.0\n" +
+        "N,1,1,36.0\n" +
+        "N,1,1,36.0\n";
 
     assertEquals(expected, resultSetData);
 
@@ -541,14 +539,14 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByOneColumnsWithCompression() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByOneColumnsWithCompression");
     ResultSet res = executeString(
-      "create table " + tableName + " (col2 int4, col3 float8) USING csv " +
-        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-        "PARTITION BY column(col1 int4)");
+        "create table " + tableName + " (col2 int4, col3 float8) USING csv " +
+            "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+            "PARTITION BY column(col1 int4)");
     res.close();
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-      "insert overwrite into " + tableName + " select l_partkey, l_quantity, l_orderkey from lineitem");
+        "insert overwrite into " + tableName + " select l_partkey, l_quantity, l_orderkey from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -577,15 +575,15 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByTwoColumnsWithCompression() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByTwoColumnsWithCompression");
     ResultSet res = executeString("create table " + tableName + " (col3 float8, col4 text) USING csv " +
-      "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-      "PARTITION by column(col1 int4, col2 int4)");
+        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+        "PARTITION by column(col1 int4, col2 int4)");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-      "insert overwrite into " + tableName +
-        " select  l_quantity, l_returnflag, l_orderkey, l_partkey from lineitem");
+        "insert overwrite into " + tableName +
+            " select  l_quantity, l_returnflag, l_orderkey, l_partkey from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -621,16 +619,16 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableByThreeColumnsWithCompression() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableByThreeColumnsWithCompression");
     ResultSet res = executeString(
-      "create table " + tableName + " (col4 text) USING csv " +
-        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-        "partition by column(col1 int4, col2 int4, col3 float8)");
+        "create table " + tableName + " (col4 text) USING csv " +
+            "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+            "partition by column(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-      "insert overwrite into " + tableName +
-        " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
+        "insert overwrite into " + tableName +
+            " select l_returnflag, l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -704,16 +702,16 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableNoMatchedPartition() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableNoMatchedPartition");
     ResultSet res = executeString(
-      "create table " + tableName + " (col4 text) USING csv " +
-        "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
-        "partition by column(col1 int4, col2 int4, col3 float8)");
+        "create table " + tableName + " (col4 text) USING csv " +
+            "WITH ('csvfile.delimiter'='|','compression.codec'='org.apache.hadoop.io.compress.DeflateCodec') " +
+            "partition by column(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     res = executeString(
-      "insert overwrite into " + tableName +
-        " select l_returnflag , l_orderkey, l_partkey, l_quantity from lineitem");
+        "insert overwrite into " + tableName +
+            " select l_returnflag , l_orderkey, l_partkey, l_quantity from lineitem");
     res.close();
     TableDesc desc = catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName);
     if (!testingCluster.isHCatalogStoreRunning()) {
@@ -760,13 +758,13 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableWithSmallerExpressions1() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions1");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     ClientProtos.SubmitQueryResponse response = client.executeQuery("insert overwrite into " + tableName
-      + " select l_orderkey, l_partkey from lineitem");
+        + " select l_orderkey, l_partkey from lineitem");
 
     assertTrue(response.hasErrorMessage());
     assertEquals(response.getErrorMessage(), "INSERT has smaller expressions than target columns\n");
@@ -780,13 +778,13 @@ public class TestTablePartitions extends QueryTestCaseBase {
   public final void testColumnPartitionedTableWithSmallerExpressions2() throws Exception {
     String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions2");
     ResultSet res = executeString(
-      "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
+        "create table " + tableName + " (col1 int4, col2 int4, null_col int4) partition by column(key float8) ");
     res.close();
 
     assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
 
     ClientProtos.SubmitQueryResponse response = client.executeQuery("insert overwrite into " + tableName
-      + " select l_returnflag , l_orderkey, l_partkey from lineitem");
+        + " select l_returnflag , l_orderkey, l_partkey from lineitem");
 
     assertTrue(response.hasErrorMessage());
     assertEquals(response.getErrorMessage(), "INSERT has smaller expressions than target columns\n");
@@ -805,11 +803,11 @@ public class TestTablePartitions extends QueryTestCaseBase {
     res.close();
 
     res = executeString("create table testinsertquery1.table1 " +
-      "(col1 int4, col2 int4, col3 float8)");
+        "(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     res = executeString("create table testinsertquery2.table1 " +
-      "(col1 int4, col2 int4, col3 float8)");
+        "(col1 int4, col2 int4, col3 float8)");
     res.close();
 
     CatalogService catalog = testingCluster.getMaster().getCatalog();
@@ -817,7 +815,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertTrue(catalog.existsTable("testinsertquery2", "table1"));
 
     res = executeString("insert overwrite into testinsertquery1.table1 " +
-      "select l_orderkey, l_partkey, l_quantity from default.lineitem;");
+        "select l_orderkey, l_partkey, l_quantity from default.lineitem;");
     res.close();
 
     TableDesc desc = catalog.getTableDesc("testinsertquery1", "table1");
@@ -826,7 +824,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
     }
 
     res = executeString("insert overwrite into testinsertquery2.table1 " +
-      "select col1, col2, col3 from testinsertquery1.table1;");
+        "select col1, col2, col3 from testinsertquery1.table1;");
     res.close();
 
     desc = catalog.getTableDesc("testinsertquery2", "table1");
@@ -835,6 +833,39 @@ public class TestTablePartitions extends QueryTestCaseBase {
     }
   }
 
+  @Test
+  public final void testColumnPartitionedTableWithSmallerExpressions5() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions5");
+    ResultSet res = executeString(
+        "create table " + tableName + " (col1 text) partition by column(col2 text) ");
+    res.close();
+
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+
+    res = executeString("insert overwrite into " + tableName + "(col1) select l_returnflag from lineitem");
+    res.close();
+    res = executeString("select * from " + tableName);
+    assertResultSet(res);
+    res.close();
+  }
+
+  @Test
+  public final void testColumnPartitionedTableWithSmallerExpressions6() throws Exception {
+    String tableName = CatalogUtil.normalizeIdentifier("testColumnPartitionedTableWithSmallerExpressions6");
+    ResultSet res = executeString(
+        "create table " + tableName + " (col1 text) partition by column(col2 text) ");
+    res.close();
+
+    assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
+
+    res = executeString(
+        "insert overwrite into " + tableName + "(col1) select l_returnflag from lineitem where l_orderkey = 1");
+    res.close();
+    res = executeString("select * from " + tableName);
+    assertResultSet(res);
+    res.close();
+  }
+
   private MasterPlan getQueryPlan(ResultSet res) {
     QueryId queryId = ((TajoResultSet)res).getQueryId();
     for (TajoWorker eachWorker: testingCluster.getTajoWorkers()) {
@@ -865,8 +896,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
       int totalBytes = 0;
       Random rand = new Random(System.currentTimeMillis());
       String col2Data = "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2" +
-        "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2" +
-        "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2";
+          "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2" +
+          "Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2Column-2";
 
       int index = 0;
       while(true) {
@@ -901,9 +932,9 @@ public class TestTablePartitions extends QueryTestCaseBase {
 
     } finally {
       testingCluster.setAllTajoDaemonConfValue(TajoConf.ConfVars.$DIST_QUERY_TABLE_PARTITION_VOLUME.varname,
-        TajoConf.ConfVars.$DIST_QUERY_TABLE_PARTITION_VOLUME.defaultVal);
+          TajoConf.ConfVars.$DIST_QUERY_TABLE_PARTITION_VOLUME.defaultVal);
       testingCluster.setAllTajoDaemonConfValue(TajoConf.ConfVars.SHUFFLE_HASH_APPENDER_PAGE_VOLUME.varname,
-        TajoConf.ConfVars.SHUFFLE_HASH_APPENDER_PAGE_VOLUME.defaultVal);
+          TajoConf.ConfVars.SHUFFLE_HASH_APPENDER_PAGE_VOLUME.defaultVal);
       executeString("DROP TABLE test_partition PURGE").close();
       executeString("DROP TABLE testScatteredHashShuffle PURGE").close();
     }
@@ -916,17 +947,17 @@ public class TestTablePartitions extends QueryTestCaseBase {
     executeDDL("lineitemspecial_ddl.sql", "lineitemspecial.tbl");
 
     executeString("CREATE TABLE IF NOT EXISTS pTable947 (id int, name text) PARTITION BY COLUMN (type text)")
-      .close();
+        .close();
     executeString("INSERT OVERWRITE INTO pTable947 SELECT l_orderkey, l_shipinstruct, l_shipmode FROM lineitemspecial")
-      .close();
+        .close();
     ResultSet res = executeString("select * from pTable947 where type='RA:*?><I/L#%S' or type='AIR'");
 
     String resStr = resultSetToString(res);
     String expected =
-      "id,name,type\n" +
-        "-------------------------------\n"
-        + "3,NONE,AIR\n"
-        + "3,TEST SPECIAL CHARS,RA:*?><I/L#%S\n";
+        "id,name,type\n" +
+            "-------------------------------\n"
+            + "3,NONE,AIR\n"
+            + "3,TEST SPECIAL CHARS,RA:*?><I/L#%S\n";
 
     assertEquals(expected, resStr);
     cleanupQuery(res);
@@ -939,9 +970,9 @@ public class TestTablePartitions extends QueryTestCaseBase {
     executeDDL("lineitemspecial_ddl.sql", "lineitemspecial.tbl");
 
     executeString("CREATE TABLE IF NOT EXISTS pTable948 (id int, name text) PARTITION BY COLUMN (type text)")
-      .close();
+        .close();
     executeString("INSERT OVERWRITE INTO pTable948 SELECT l_orderkey, l_shipinstruct, l_shipmode FROM lineitemspecial")
-      .close();
+        .close();
 
     ResultSet res = executeString("select * from pTable948 where type='RA:*?><I/L#%S'");
     assertResultSet(res);
@@ -951,4 +982,25 @@ public class TestTablePartitions extends QueryTestCaseBase {
     assertResultSet(res);
     cleanupQuery(res);
   }
+
+  @Test
+  public final void testIgnoreFilesInIntermediateDir() throws Exception {
+    // See - TAJO-1219: Files located in intermediate directories of partitioned table should be ignored
+    // It verifies that Tajo ignores files located in intermediate directories of partitioned table.
+
+    Path testDir = CommonTestingUtil.getTestDir();
+
+    executeString(
+      "CREATE EXTERNAL TABLE testIgnoreFilesInIntermediateDir (col1 int) USING CSV PARTITION BY COLUMN (col2 text) " +
+        "LOCATION '" + testDir + "'");
+
+    FileSystem fs = testDir.getFileSystem(conf);
+    FSDataOutputStream fos = fs.create(new Path(testDir, "table1.data"));
+    fos.write("a|b|c".getBytes());
+    fos.close();
+
+    ResultSet res = executeString("select * from testIgnoreFilesInIntermediateDir;");
+    assertFalse(res.next());
+    res.close();
+  }
 }


[4/4] tajo git commit: Merge branch 'master' of http://git-wip-us.apache.org/repos/asf/tajo into index_support

Posted by ji...@apache.org.
Merge branch 'master' of http://git-wip-us.apache.org/repos/asf/tajo into index_support


Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/72600c13
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/72600c13
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/72600c13

Branch: refs/heads/index_support
Commit: 72600c1389dd9ffe184b1004e9e66f7404aefd08
Parents: 5c11283 2a69bcc
Author: Jihoon Son <ji...@apache.org>
Authored: Fri Dec 5 11:22:01 2014 +0900
Committer: Jihoon Son <ji...@apache.org>
Committed: Fri Dec 5 11:22:01 2014 +0900

----------------------------------------------------------------------
 CHANGES                                         |  5 +++++
 .../java/org/apache/tajo/datum/DateDatum.java   | 11 +---------
 .../org/apache/tajo/datum/IntervalDatum.java    |  1 -
 .../java/org/apache/tajo/datum/TimeDatum.java   |  4 ----
 .../apache/tajo/datum/TestIntervalDatum.java    |  8 ++++----
 .../apache/tajo/master/querymaster/Query.java   |  3 ++-
 .../tajo/engine/query/TestTablePartitions.java  | 21 +++++++++++++++-----
 7 files changed, 28 insertions(+), 25 deletions(-)
----------------------------------------------------------------------