You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tajo.apache.org by bl...@apache.org on 2015/09/17 11:57:16 UTC
[1/2] tajo git commit: Revert "TAJO-1493: Make partition pruning
based on catalog informations."
Repository: tajo
Updated Branches:
refs/heads/master b0c0a390e -> e1c2d352e
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java b/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
index a41812a..94e5e71 100644
--- a/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestTablePartitions.java
@@ -433,15 +433,8 @@ public class TestTablePartitions extends QueryTestCaseBase {
assertEquals(resultRows2.get(res.getDouble(4))[1], res.getInt(3));
}
- res = executeString("select * from " + tableName + " WHERE (col1 ='1' or col1 = '100') and col3 > 20");
- String result = resultSetToString(res);
- String expectedResult = "col4,col1,col2,col3\n" +
- "-------------------------------\n" +
- "N,1,1,36.0\n";
- res.close();
- assertEquals(expectedResult, result);
-
res = executeString("SELECT col1, col2, col3 FROM " + tableName);
+ String result = resultSetToString(res);
res.close();
verifyPartitionDirectoryFromCatalog(DEFAULT_DATABASE_NAME, tableName, new String[]{"col1", "col2", "col3"},
@@ -596,7 +589,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
}
private final void verifyKeptExistingData(ResultSet res, String tableName) throws Exception {
- res = executeString("select * from " + tableName + " where col2 = 1 order by col4, col1, col2, col3");
+ res = executeString("select * from " + tableName + " where col2 = 1");
String resultSetData = resultSetToString(res);
res.close();
String expected = "col4,col1,col2,col3\n" +
@@ -1260,7 +1253,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
}
query.append(" ").append(partitionColumn);
}
- query.append(" FROM ").append(databaseName).append(".").append(tableName);
+ query.append(" FROM ").append(tableName);
ResultSet res = executeString(query.toString());
StringBuilder partitionName = new StringBuilder();
@@ -1320,7 +1313,7 @@ public class TestTablePartitions extends QueryTestCaseBase {
// If duplicated partitions had been removed, partitions just will contain 'KEY=N' partition and 'KEY=R'
// partition. In previous Query and Stage, duplicated partitions were not deleted because they had been in List.
// If you want to verify duplicated partitions, you need to use List instead of Set with DerbyStore.
- List<PartitionDescProto> partitions = catalog.getAllPartitions(DEFAULT_DATABASE_NAME, tableName);
+ List<PartitionDescProto> partitions = catalog.getPartitions(DEFAULT_DATABASE_NAME, tableName);
assertEquals(2, partitions.size());
PartitionDescProto firstPartition = catalog.getPartition(DEFAULT_DATABASE_NAME, tableName, "key=N");
@@ -1332,421 +1325,4 @@ public class TestTablePartitions extends QueryTestCaseBase {
executeString("DROP TABLE " + tableName + " PURGE");
}
}
-
- @Test
- public final void testPatternMatchingPredicatesAndStringFunctions() throws Exception {
- ResultSet res = null;
- String tableName = CatalogUtil.normalizeIdentifier("testPatternMatchingPredicatesAndStringFunctions");
- String expectedResult;
-
- if (nodeType == NodeType.INSERT) {
- executeString("create table " + tableName
- + " (col1 int4, col2 int4) partition by column(l_shipdate text, l_returnflag text) ").close();
-
- assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
- assertEquals(2, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getSchema().size());
- assertEquals(4, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
-
- executeString(
- "insert overwrite into " + tableName + " select l_orderkey, l_partkey, l_shipdate, l_returnflag from lineitem");
- } else {
- executeString(
- "create table " + tableName + "(col1 int4, col2 int4) partition by column(l_shipdate text, l_returnflag text) "
- + " as select l_orderkey, l_partkey, l_shipdate, l_returnflag from lineitem");
- }
-
- assertTrue(client.existTable(tableName));
-
- // Like
- res = executeString("SELECT * FROM " + tableName
- + " WHERE l_shipdate LIKE '1996%' and l_returnflag = 'N' order by l_shipdate");
-
- expectedResult = "col1,col2,l_shipdate,l_returnflag\n" +
- "-------------------------------\n" +
- "1,1,1996-03-13,N\n" +
- "1,1,1996-04-12,N\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Not like
- res = executeString("SELECT * FROM " + tableName
- + " WHERE l_shipdate NOT LIKE '1996%' and l_returnflag IN ('R') order by l_shipdate");
-
- expectedResult = "col1,col2,l_shipdate,l_returnflag\n" +
- "-------------------------------\n" +
- "3,3,1993-11-09,R\n" +
- "3,2,1994-02-02,R\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // In
- res = executeString("SELECT * FROM " + tableName
- + " WHERE l_shipdate IN ('1993-11-09', '1994-02-02', '1997-01-28') AND l_returnflag = 'R' order by l_shipdate");
-
- expectedResult = "col1,col2,l_shipdate,l_returnflag\n" +
- "-------------------------------\n" +
- "3,3,1993-11-09,R\n" +
- "3,2,1994-02-02,R\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Similar to
- res = executeString("SELECT * FROM " + tableName + " WHERE l_shipdate similar to '1993%' order by l_shipdate");
-
- expectedResult = "col1,col2,l_shipdate,l_returnflag\n" +
- "-------------------------------\n" +
- "3,3,1993-11-09,R\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Regular expression
- res = executeString("SELECT * FROM " + tableName
- + " WHERE l_shipdate regexp '[1-2][0-9][0-9][3-9]-[0-1][0-9]-[0-3][0-9]' "
- + " AND l_returnflag <> 'N' ORDER BY l_shipdate");
-
- expectedResult = "col1,col2,l_shipdate,l_returnflag\n" +
- "-------------------------------\n" +
- "3,3,1993-11-09,R\n" +
- "3,2,1994-02-02,R\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Concatenate
- res = executeString("SELECT * FROM " + tableName
- + " WHERE l_shipdate = ( '1996' || '-' || '03' || '-' || '13' ) order by l_shipdate");
-
- expectedResult = "col1,col2,l_shipdate,l_returnflag\n" +
- "-------------------------------\n" +
- "1,1,1996-03-13,N\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- executeString("DROP TABLE " + tableName + " PURGE").close();
- res.close();
- }
-
- @Test
- public final void testDatePartitionColumn() throws Exception {
- ResultSet res = null;
- String tableName = CatalogUtil.normalizeIdentifier("testDatePartitionColumn");
- String expectedResult;
-
- if (nodeType == NodeType.INSERT) {
- executeString("create table " + tableName + " (col1 int4, col2 int4) partition by column(key date) ").close();
-
- assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
- assertEquals(2, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getSchema().size());
- assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
-
- executeString(
- "insert overwrite into " + tableName + " select l_orderkey, l_partkey, l_shipdate from lineitem");
- } else {
- executeString(
- "create table " + tableName + "(col1 int4, col2 int4) partition by column(key date) "
- + " as select l_orderkey, l_partkey, l_shipdate::date from lineitem");
- }
-
- assertTrue(client.existTable(tableName));
-
- // LessThanOrEquals
- res = executeString("SELECT * FROM " + tableName + " WHERE key <= date '1995-09-01' order by col1, col2, key");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "3,2,1994-02-02\n" +
- "3,3,1993-11-09\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // LessThan and GreaterThan
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > to_date('1993-01-01', 'YYYY-MM-DD') " +
- " and key < to_date('1996-01-01', 'YYYY-MM-DD') order by col1, col2, key desc");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "3,2,1994-02-02\n" +
- "3,3,1993-11-09\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Between
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key between date '1993-01-01' and date '1997-01-01' order by col1, col2, key desc");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "1,1,1996-04-12\n" +
- "1,1,1996-03-13\n" +
- "3,2,1994-02-02\n" +
- "3,3,1993-11-09\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Cast
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > '1993-01-01'::date " +
- " and key < '1997-01-01'::timestamp order by col1, col2, key ");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "1,1,1996-03-13\n" +
- "1,1,1996-04-12\n" +
- "3,2,1994-02-02\n" +
- "3,3,1993-11-09\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Interval
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > '1993-01-01'::date " +
- " and key < date '1994-01-01' + interval '1 year' order by col1, col2, key ");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "3,2,1994-02-02\n" +
- "3,3,1993-11-09\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // DateTime Function #1
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > '1993-01-01'::date " +
- " and key < add_months(date '1994-01-01', 12) order by col1, col2, key ");
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // DateTime Function #2
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > '1993-01-01'::date " +
- " and key < add_months('1994-01-01'::timestamp, 12) order by col1, col2, key ");
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- executeString("DROP TABLE " + tableName + " PURGE").close();
- res.close();
- }
-
- @Test
- public final void testTimestampPartitionColumn() throws Exception {
- ResultSet res = null;
- String tableName = CatalogUtil.normalizeIdentifier("testTimestampPartitionColumn");
- String expectedResult;
-
- if (nodeType == NodeType.INSERT) {
- executeString("create table " + tableName
- + " (col1 int4, col2 int4) partition by column(key timestamp) ").close();
-
- assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
- assertEquals(2, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getSchema().size());
- assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
-
- executeString(
- "insert overwrite into " + tableName
- + " select l_orderkey, l_partkey, to_timestamp(l_shipdate, 'YYYY-MM-DD') from lineitem");
- } else {
- executeString(
- "create table " + tableName + "(col1 int4, col2 int4) partition by column(key timestamp) "
- + " as select l_orderkey, l_partkey, to_timestamp(l_shipdate, 'YYYY-MM-DD') from lineitem");
- }
-
- assertTrue(client.existTable(tableName));
-
- // LessThanOrEquals
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key <= to_timestamp('1995-09-01', 'YYYY-MM-DD') order by col1, col2, key");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "3,2,1994-02-02 00:00:00\n" +
- "3,3,1993-11-09 00:00:00\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // LessThan and GreaterThan
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > to_timestamp('1993-01-01', 'YYYY-MM-DD') and " +
- "key < to_timestamp('1996-01-01', 'YYYY-MM-DD') order by col1, col2, key desc");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "3,2,1994-02-02 00:00:00\n" +
- "3,3,1993-11-09 00:00:00\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Between
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key between to_timestamp('1993-01-01', 'YYYY-MM-DD') " +
- "and to_timestamp('1997-01-01', 'YYYY-MM-DD') order by col1, col2, key desc");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "1,1,1996-04-12 00:00:00\n" +
- "1,1,1996-03-13 00:00:00\n" +
- "3,2,1994-02-02 00:00:00\n" +
- "3,3,1993-11-09 00:00:00\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- executeString("DROP TABLE " + tableName + " PURGE").close();
- res.close();
- }
-
- @Test
- public final void testTimePartitionColumn() throws Exception {
- ResultSet res = null;
- String tableName = CatalogUtil.normalizeIdentifier("testTimePartitionColumn");
- String expectedResult;
-
- if (nodeType == NodeType.INSERT) {
- executeString("create table " + tableName
- + " (col1 int4, col2 int4) partition by column(key time) ").close();
-
- assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, tableName));
- assertEquals(2, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getSchema().size());
- assertEquals(3, catalog.getTableDesc(DEFAULT_DATABASE_NAME, tableName).getLogicalSchema().size());
-
- executeString(
- "insert overwrite into " + tableName
- + " select l_orderkey, l_partkey " +
- " , CASE l_shipdate WHEN '1996-03-13' THEN cast ('11:20:40' as time) " +
- " WHEN '1997-01-28' THEN cast ('12:10:20' as time) " +
- " WHEN '1994-02-02' THEN cast ('12:10:30' as time) " +
- " ELSE cast ('00:00:00' as time) END " +
- " from lineitem");
- } else {
- executeString(
- "create table " + tableName + "(col1 int4, col2 int4) partition by column(key time) "
- + " as select l_orderkey, l_partkey " +
- " , CASE l_shipdate WHEN '1996-03-13' THEN cast ('11:20:40' as time) " +
- " WHEN '1997-01-28' THEN cast ('12:10:20' as time) " +
- " WHEN '1994-02-02' THEN cast ('12:10:30' as time) " +
- " ELSE cast ('00:00:00' as time) END " +
- " from lineitem");
- }
-
- assertTrue(client.existTable(tableName));
- // LessThanOrEquals
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key <= cast('12:10:20' as time) order by col1, col2, key");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "1,1,00:00:00\n" +
- "1,1,11:20:40\n" +
- "2,2,12:10:20\n" +
- "3,3,00:00:00\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // LessThan and GreaterThan
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key > cast('00:00:00' as time) and " +
- "key < cast('12:10:00' as time) order by col1, col2, key desc");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "1,1,11:20:40\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- // Between
- res = executeString("SELECT * FROM " + tableName
- + " WHERE key between cast('11:00:00' as time) " +
- "and cast('13:00:00' as time) order by col1, col2, key desc");
-
- expectedResult = "col1,col2,key\n" +
- "-------------------------------\n" +
- "1,1,11:20:40\n" +
- "2,2,12:10:20\n" +
- "3,2,12:10:30\n";
-
- assertEquals(expectedResult, resultSetToString(res));
- res.close();
-
- executeString("DROP TABLE " + tableName + " PURGE").close();
- res.close();
- }
-
- @Test
- public final void testRemainPartitionPath() throws Exception {
- ResultSet res = null;
- executeString("create database test_partition").close();
-
- String databaseName = "test_partition";
- String tableName = CatalogUtil.normalizeIdentifier("part");
-
- ClientProtos.SubmitQueryResponse response;
- if (nodeType == NodeType.INSERT) {
- res = executeString(
- "create table " + databaseName + "." + tableName + " (col1 int4, col2 int4) partition by column(key float8) ");
- res.close();
-
- assertTrue(catalog.existsTable(databaseName, tableName));
- assertEquals(2, catalog.getTableDesc(databaseName, tableName).getSchema().size());
- assertEquals(3, catalog.getTableDesc(databaseName, tableName).getLogicalSchema().size());
-
- response = client.executeQuery(
- "insert overwrite into " + databaseName + "." + tableName + " select l_orderkey, l_partkey, " +
- "l_quantity from lineitem");
- } else {
- response = client.executeQuery(
- "create table "+ databaseName + "." + tableName + "(col1 int4, col2 int4) partition by column(key float8) "
- + " as select l_orderkey, l_partkey, l_quantity from lineitem");
- }
-
- QueryId queryId = new QueryId(response.getQueryId());
- testingCluster.waitForQuerySubmitted(queryId, 10);
- QueryMasterTask queryMasterTask = testingCluster.getQueryMasterTask(queryId);
- assertNotNull(queryMasterTask);
- TajoClientUtil.waitCompletion(client, queryId);
-
- MasterPlan plan = queryMasterTask.getQuery().getPlan();
-
- ExecutionBlock rootEB = plan.getRoot();
-
- assertEquals(1, plan.getChildCount(rootEB.getId()));
-
- ExecutionBlock insertEB = plan.getChild(rootEB.getId(), 0);
- assertNotNull(insertEB);
-
- assertEquals(nodeType, insertEB.getPlan().getType());
- assertEquals(1, plan.getChildCount(insertEB.getId()));
-
- ExecutionBlock scanEB = plan.getChild(insertEB.getId(), 0);
-
- List<DataChannel> list = plan.getOutgoingChannels(scanEB.getId());
- assertEquals(1, list.size());
- DataChannel channel = list.get(0);
- assertNotNull(channel);
- assertEquals(SCATTERED_HASH_SHUFFLE, channel.getShuffleType());
- assertEquals(1, channel.getShuffleKeys().length);
-
- TableDesc tableDesc = catalog.getTableDesc(databaseName, tableName);
- verifyPartitionDirectoryFromCatalog(databaseName, tableName, new String[]{"key"},
- tableDesc.getStats().getNumRows());
-
- executeString("DROP TABLE " + databaseName + "." + tableName + " PURGE").close();
- executeString("DROP database " + databaseName).close();
- }
-
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
index 832b8d3..a8a1c78 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/ColPartitionStoreExec.java
@@ -38,7 +38,6 @@ import org.apache.tajo.plan.logical.NodeType;
import org.apache.tajo.plan.logical.StoreTableNode;
import org.apache.tajo.storage.*;
import org.apache.tajo.unit.StorageUnit;
-import org.apache.tajo.util.StringUtils;
import org.apache.tajo.worker.TaskAttemptContext;
import java.io.IOException;
@@ -174,18 +173,17 @@ public abstract class ColPartitionStoreExec extends UnaryPhysicalExec {
PartitionKeyProto.Builder keyBuilder = PartitionKeyProto.newBuilder();
keyBuilder.setColumnName(split[0]);
- // Partition path have been escaped to avoid URISyntaxException. But partition value of partition keys table
- // need to contain unescaped value for comparing filter conditions in select statement.
- keyBuilder.setPartitionValue(StringUtils.unescapePathName(split[1]));
+ keyBuilder.setPartitionValue(split[1]);
builder.addPartitionKeys(keyBuilder.build());
}
if (this.plan.getUri() == null) {
- // In CTAS, the uri would be null. So, it get the uri from staging directory.
- int endIndex = storeTablePath.toString().indexOf(FileTablespace.TMP_STAGING_DIR_PREFIX);
+ // In CTAS, the uri would be null. So,
+ String[] split = CatalogUtil.splitTableName(plan.getTableName());
+ int endIndex = storeTablePath.toString().indexOf(split[1]) + split[1].length();
String outputPath = storeTablePath.toString().substring(0, endIndex);
- builder.setPath(outputPath + partition);
+ builder.setPath(outputPath + "/" + partition);
} else {
builder.setPath(this.plan.getUri().toString() + "/" + partition);
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java b/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
index 6160cdb..cd43add 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
@@ -970,7 +970,7 @@ public class TajoMasterClientService extends AbstractService {
tableName = request.getValue();
}
- List<PartitionDescProto> partitions = catalog.getAllPartitions(databaseName, tableName);
+ List<PartitionDescProto> partitions = catalog.getPartitions(databaseName, tableName);
return PartitionListResponse.newBuilder()
.setState(OK)
.addAllPartition(partitions)
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
index e61d361..d06c1d3 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AlgebraicUtil.java
@@ -18,19 +18,15 @@
package org.apache.tajo.plan.expr;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
import org.apache.tajo.algebra.*;
import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.proto.CatalogProtos;
import org.apache.tajo.exception.TajoException;
import org.apache.tajo.plan.visitor.SimpleAlgebraVisitor;
-import org.apache.tajo.util.TUtil;
import java.util.*;
public class AlgebraicUtil {
-
+
/**
* Transpose a given comparison expression into the expression
* where the variable corresponding to the target is placed
@@ -497,190 +493,4 @@ public class AlgebraicUtil {
return super.visitTimeLiteral(ctx, stack, expr);
}
}
-
- /**
- * Find the top expr matched to type from the given expr
- *
- * @param expr start expr
- * @param type to find
- * @return a found expr
- */
- public static <T extends Expr> T findTopExpr(Expr expr, OpType type) throws TajoException {
- Preconditions.checkNotNull(expr);
- Preconditions.checkNotNull(type);
-
- ExprFinder finder = new ExprFinder(type);
- finder.visit(null, new Stack<Expr>(), expr);
-
- if (finder.getFoundExprs().size() == 0) {
- return null;
- }
- return (T) finder.getFoundExprs().get(0);
- }
-
- private static class ExprFinder extends SimpleAlgebraVisitor<Object, Expr> {
- private List<Expr> list = new ArrayList<Expr>();
- private final OpType[] tofind;
- private boolean topmost = false;
- private boolean finished = false;
-
- public ExprFinder(OpType... type) {
-
- this.tofind = type;
- }
-
- public ExprFinder(OpType[] type, boolean topmost) {
- this(type);
- this.topmost = topmost;
- }
-
- @Override
- public Expr visit(Object ctx, Stack<Expr> stack, Expr expr) throws TajoException {
- if (!finished) {
- for (OpType type : tofind) {
- if (expr.getType() == type) {
- list.add(expr);
- }
- if (topmost && list.size() > 0) {
- finished = true;
- }
- }
- }
- return super.visit(ctx, stack, expr);
- }
-
- public List<Expr> getFoundExprs() {
- return list;
- }
-
- }
-
- public static Expr[] toConjunctiveNormalFormArray(Expr expr) {
- List<Expr> list = new ArrayList<Expr>();
- toConjunctiveNormalFormArrayRecursive(expr, list);
- return list.toArray(new Expr[list.size()]);
- }
-
- private static void toConjunctiveNormalFormArrayRecursive(Expr node, List<Expr> found) {
- if (node.getType() == OpType.And) {
- toConjunctiveNormalFormArrayRecursive(((BinaryOperator) node).getLeft(), found);
- toConjunctiveNormalFormArrayRecursive(((BinaryOperator) node).getRight(), found);
- } else {
- found.add(node);
- }
- }
-
- /**
- * It finds unique columns from a Expr.
- */
- public static LinkedHashSet<ColumnReferenceExpr> findUniqueColumnReferences(Expr expr) throws TajoException {
- UniqueColumnReferenceFinder finder = new UniqueColumnReferenceFinder();
- finder.visit(null, new Stack<Expr>(), expr);
- return finder.getColumnRefs();
- }
-
- private static class UniqueColumnReferenceFinder extends SimpleAlgebraVisitor<Object, Expr> {
- private LinkedHashSet<ColumnReferenceExpr> columnSet = Sets.newLinkedHashSet();
- private ColumnReferenceExpr field = null;
-
- @Override
- public Expr visit(Object ctx, Stack<Expr> stack, Expr expr) throws TajoException {
- if (expr.getType() == OpType.Column) {
- field = (ColumnReferenceExpr) expr;
- columnSet.add(field);
- }
- return super.visit(ctx, stack, expr);
- }
-
- public LinkedHashSet<ColumnReferenceExpr> getColumnRefs() {
- return this.columnSet;
- }
-
- }
-
- /**
- * Build Exprs for all columns with a list of filter conditions.
- *
- * For example, consider you have a partitioned table for three columns (i.e., col1, col2, col3).
- * Then, this methods will create three Exprs for (col1), (col2), (col3).
- *
- * Assume that an user gives a condition WHERE col1 ='A' and col3 = 'C'.
- * There is no filter condition corresponding to col2.
- * Then, the path filter conditions are corresponding to the followings:
- *
- * The first Expr: col1 = 'A'
- * The second Expr: col2 IS NOT NULL
- * The third Expr: col3 = 'C'
- *
- * 'IS NOT NULL' predicate is always true against the partition path.
- *
- *
- * @param partitionColumns
- * @param conjunctiveForms
- * @return
- */
- public static Expr[] getAccumulatedFiltersByExpr(String tableName,
- List<CatalogProtos.ColumnProto> partitionColumns, Expr[] conjunctiveForms) throws TajoException {
- Expr[] filters = new Expr[partitionColumns.size()];
- Column target;
-
- for (int i = 0; i < partitionColumns.size(); i++) {
- List<Expr> accumulatedFilters = TUtil.newList();
- target = new Column(partitionColumns.get(i));
- ColumnReferenceExpr columnReference = new ColumnReferenceExpr(tableName, target.getSimpleName());
-
- if (conjunctiveForms == null) {
- accumulatedFilters.add(new IsNullPredicate(true, columnReference));
- } else {
- for (Expr expr : conjunctiveForms) {
- if (AlgebraicUtil.findUniqueColumnReferences(expr).contains(columnReference)) {
- // Accumulate one qual per level
- accumulatedFilters.add(expr);
- }
- }
-
- if (accumulatedFilters.size() == 0) {
- accumulatedFilters.add(new IsNullPredicate(true, columnReference));
- }
- }
-
- Expr filterPerLevel = AlgebraicUtil.createSingletonExprFromCNFByExpr(
- accumulatedFilters.toArray(new Expr[accumulatedFilters.size()]));
- filters[i] = filterPerLevel;
- }
-
- return filters;
- }
-
- public static Expr createSingletonExprFromCNFByExpr(Collection<Expr> cnfExprs) {
- return createSingletonExprFromCNFByExpr(cnfExprs.toArray(new Expr[cnfExprs.size()]));
- }
-
- /**
- * Convert a list of conjunctive normal forms into a singleton expression.
- *
- * @param cnfExprs
- * @return The EvalNode object that merges all CNF-formed expressions.
- */
- public static Expr createSingletonExprFromCNFByExpr(Expr... cnfExprs) {
- if (cnfExprs.length == 1) {
- return cnfExprs[0];
- }
-
- return createSingletonExprFromCNFRecursiveByExpr(cnfExprs, 0);
- }
-
- private static Expr createSingletonExprFromCNFRecursiveByExpr(Expr[] exprs, int idx) {
- if (idx >= exprs.length) {
- throw new ArrayIndexOutOfBoundsException("index " + idx + " is exceeded the maximum length ("+
- exprs.length+") of EvalNode");
- }
-
- if (idx == exprs.length - 2) {
- return new BinaryOperator(OpType.And, exprs[idx], exprs[idx + 1]);
- } else {
- return new BinaryOperator(OpType.And, exprs[idx], createSingletonExprFromCNFRecursiveByExpr(exprs, idx + 1));
- }
- }
-
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
index 47e2c6c..b5cd42b 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
@@ -24,19 +24,19 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.*;
import org.apache.tajo.OverridableConf;
-import org.apache.tajo.catalog.*;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableDesc;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
-import org.apache.tajo.catalog.proto.CatalogProtos.PartitionsByAlgebraProto;
-import org.apache.tajo.catalog.proto.CatalogProtos.PartitionDescProto;
import org.apache.tajo.datum.DatumFactory;
import org.apache.tajo.datum.NullDatum;
-import org.apache.tajo.exception.*;
+import org.apache.tajo.exception.TajoException;
+import org.apache.tajo.exception.TajoInternalError;
import org.apache.tajo.plan.LogicalPlan;
import org.apache.tajo.plan.expr.*;
import org.apache.tajo.plan.logical.*;
import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRuleContext;
-import org.apache.tajo.plan.util.EvalNodeToExprConverter;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.plan.visitor.BasicLogicalPlanVisitor;
import org.apache.tajo.storage.Tuple;
@@ -44,11 +44,11 @@ import org.apache.tajo.storage.VTuple;
import org.apache.tajo.util.StringUtils;
import java.io.IOException;
-import java.util.*;
+import java.util.List;
+import java.util.Set;
+import java.util.Stack;
public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
- private CatalogService catalog;
-
private static final Log LOG = LogFactory.getLog(PartitionedTableRewriter.class);
private static final String NAME = "Partitioned Table Rewriter";
@@ -78,7 +78,6 @@ public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
public LogicalPlan rewrite(LogicalPlanRewriteRuleContext context) throws TajoException {
LogicalPlan plan = context.getPlan();
LogicalPlan.QueryBlock rootBlock = plan.getRootBlock();
- this.catalog = context.getCatalog();
rewriter.visit(context.getQueryContext(), plan, rootBlock, rootBlock.getRoot(), new Stack<LogicalNode>());
return plan;
}
@@ -119,137 +118,32 @@ public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
* @return
* @throws IOException
*/
- private Path [] findFilteredPaths(OverridableConf queryContext, String tableName,
- Schema partitionColumns, EvalNode [] conjunctiveForms, Path tablePath)
- throws IOException, UndefinedDatabaseException, UndefinedTableException,
- UndefinedPartitionMethodException, UndefinedOperatorException {
+ private Path [] findFilteredPaths(OverridableConf queryContext, Schema partitionColumns, EvalNode [] conjunctiveForms,
+ Path tablePath)
+ throws IOException {
- Path [] filteredPaths = null;
FileSystem fs = tablePath.getFileSystem(queryContext.getConf());
- String [] splits = CatalogUtil.splitFQTableName(tableName);
- List<PartitionDescProto> partitions = null;
- try {
- if (conjunctiveForms == null) {
- partitions = catalog.getAllPartitions(splits[0], splits[1]);
- } else {
- PartitionsByAlgebraProto request = getPartitionsAlgebraProto(splits[0], splits[1], conjunctiveForms);
- partitions = catalog.getPartitionsByAlgebra(request);
- }
- // If catalog returns list of table partitions successfully, build path lists for scanning table data.
- if (partitions != null) {
- filteredPaths = new Path[partitions.size()];
- for (int i = 0; i < partitions.size(); i++) {
- filteredPaths[i] = new Path(partitions.get(i).getPath());
- }
- }
- } catch (TajoInternalError e) {
- LOG.error(e.getMessage(), e);
+ PathFilter [] filters;
+ if (conjunctiveForms == null) {
+ filters = buildAllAcceptingPathFilters(partitionColumns);
+ } else {
+ filters = buildPathFiltersForAllLevels(partitionColumns, conjunctiveForms);
}
- // If we should fail to build path lists with catalog, we need to path lists using getting an array of FileStatus
- // objects with the path-filter.
- if (partitions == null || filteredPaths == null) {
- PathFilter [] filters;
- if (conjunctiveForms == null) {
- filters = buildAllAcceptingPathFilters(partitionColumns);
- } else {
- filters = buildPathFiltersForAllLevels(partitionColumns, conjunctiveForms);
- }
-
- // loop from one to the number of partition columns
- filteredPaths = toPathArray(fs.listStatus(tablePath, filters[0]));
+ // loop from one to the number of partition columns
+ Path [] filteredPaths = toPathArray(fs.listStatus(tablePath, filters[0]));
- for (int i = 1; i < partitionColumns.size(); i++) {
- // Get all file status matched to a ith level path filter.
- filteredPaths = toPathArray(fs.listStatus(filteredPaths, filters[i]));
- }
+ for (int i = 1; i < partitionColumns.size(); i++) {
+ // Get all file status matched to a ith level path filter.
+ filteredPaths = toPathArray(fs.listStatus(filteredPaths, filters[i]));
}
LOG.info("Filtered directory or files: " + filteredPaths.length);
-
return filteredPaths;
}
/**
- * This will build algebra expressions for querying partitions and partition keys in CatalogStore.
- *
- * For example, consider you have a partitioned table for three columns (i.e., col1, col2, col3).
- * Assume that an user gives a condition WHERE (col1 ='1' or col1 = '100') and col3 > 20 .
- *
- * Then, the algebra expression would be generated as following:
- *
- * {
- * "LeftExpr": {
- * "LeftExpr": {
- * "Qualifier": "default.table1",
- * "ColumnName": "col3",
- * "OpType": "Column"
- * },
- * "RightExpr": {
- * "Value": "20.0",
- * "ValueType": "Unsigned_Integer",
- * "OpType": "Literal"
- * },
- * "OpType": "GreaterThan"
- * },
- * "RightExpr": {
- * "LeftExpr": {
- * "LeftExpr": {
- * "Qualifier": "default.table1",
- * "ColumnName": "col1",
- * "OpType": "Column"
- * },
- * "RightExpr": {
- * "Value": "1",
- * "ValueType": "String",
- * "OpType": "Literal"
- * },
- * "OpType": "Equals"
- * },
- * "RightExpr": {
- * "LeftExpr": {
- * "Qualifier": "default.table1",
- * "ColumnName": "col1",
- * "OpType": "Column"
- * },
- * "RightExpr": {
- * "Value": "100",
- * "ValueType": "String",
- * "OpType": "Literal"
- * },
- * "OpType": "Equals"
- * },
- * "OpType": "Or"
- * },
- * "OpType": "And"
- *}
- *
- * @param databaseName
- * @param tableName
- * @param conjunctiveForms
- * @return
- */
- public static PartitionsByAlgebraProto getPartitionsAlgebraProto(
- String databaseName, String tableName, EvalNode [] conjunctiveForms) {
-
- PartitionsByAlgebraProto.Builder request = PartitionsByAlgebraProto.newBuilder();
- request.setDatabaseName(databaseName);
- request.setTableName(tableName);
-
- if (conjunctiveForms != null) {
- EvalNode evalNode = AlgebraicUtil.createSingletonExprFromCNF(conjunctiveForms);
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(databaseName + "." + tableName);
- convertor.visit(null, evalNode, new Stack<EvalNode>());
- request.setAlgebra(convertor.getResult().toJson());
- } else {
- request.setAlgebra("");
- }
-
- return request.build();
- }
-
- /**
* Build path filters for all levels with a list of filter conditions.
*
* For example, consider you have a partitioned table for three columns (i.e., col1, col2, col3).
@@ -297,7 +191,6 @@ public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
accumulatedFilters.toArray(new EvalNode[accumulatedFilters.size()]));
filters[i] = new PartitionPathFilter(partitionColumns, filterPerLevel);
}
-
return filters;
}
@@ -329,9 +222,7 @@ public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
return paths;
}
- public Path [] findFilteredPartitionPaths(OverridableConf queryContext, ScanNode scanNode) throws IOException,
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException,
- UndefinedOperatorException {
+ private Path [] findFilteredPartitionPaths(OverridableConf queryContext, ScanNode scanNode) throws IOException {
TableDesc table = scanNode.getTableDesc();
PartitionMethodDesc partitionDesc = scanNode.getTableDesc().getPartitionMethod();
@@ -370,10 +261,10 @@ public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
}
if (indexablePredicateSet.size() > 0) { // There are at least one indexable predicates
- return findFilteredPaths(queryContext, table.getName(), paritionValuesSchema,
+ return findFilteredPaths(queryContext, paritionValuesSchema,
indexablePredicateSet.toArray(new EvalNode[indexablePredicateSet.size()]), new Path(table.getUri()));
} else { // otherwise, we will get all partition paths.
- return findFilteredPaths(queryContext, table.getName(), paritionValuesSchema, null, new Path(table.getUri()));
+ return findFilteredPaths(queryContext, paritionValuesSchema, null, new Path(table.getUri()));
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-plan/src/main/java/org/apache/tajo/plan/util/EvalNodeToExprConverter.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/util/EvalNodeToExprConverter.java b/tajo-plan/src/main/java/org/apache/tajo/plan/util/EvalNodeToExprConverter.java
deleted file mode 100644
index 7510bcd..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/util/EvalNodeToExprConverter.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.plan.util;
-
-import org.apache.tajo.algebra.*;
-import org.apache.tajo.datum.DateDatum;
-import org.apache.tajo.datum.Datum;
-import org.apache.tajo.datum.TimeDatum;
-import org.apache.tajo.datum.TimestampDatum;
-import org.apache.tajo.plan.expr.*;
-
-import java.util.Stack;
-
-/**
- * This converts EvalNode tree to Expr tree.
- *
- */
-public class EvalNodeToExprConverter extends SimpleEvalNodeVisitor<Object> {
- private Stack<Expr> exprs = new Stack<Expr>();
-
- private String tableName;
-
- public EvalNodeToExprConverter(String tableName) {
- this.tableName = tableName;
- }
-
- public Expr getResult() {
- return exprs.pop();
- }
-
- @Override
- protected EvalNode visitBinaryEval(Object o, Stack<EvalNode> stack, BinaryEval binaryEval) {
- stack.push(binaryEval);
- visit(o, binaryEval.getLeftExpr(), stack);
- Expr left = exprs.pop();
-
- visit(o, binaryEval.getRightExpr(), stack);
- Expr right = exprs.pop();
-
- Expr expr = null;
- switch (binaryEval.getType()) {
- // Arithmetic expression
- case PLUS:
- expr = new BinaryOperator(OpType.Plus, left, right);
- break;
- case MINUS:
- expr = new BinaryOperator(OpType.Minus, left, right);
- break;
- case MULTIPLY:
- expr = new BinaryOperator(OpType.Multiply, left, right);
- break;
- case DIVIDE:
- expr = new BinaryOperator(OpType.Divide, left, right);
- break;
- case MODULAR:
- expr = new BinaryOperator(OpType.Modular, left, right);
- break;
-
- // Logical Predicates
- case AND:
- expr = new BinaryOperator(OpType.And, left, right);
- break;
- case OR:
- expr = new BinaryOperator(OpType.Or, left, right);
- break;
- case NOT:
- expr = new BinaryOperator(OpType.Not, left, right);
- break;
-
- // Comparison Predicates
- case EQUAL:
- expr = new BinaryOperator(OpType.Equals, left, right);
- break;
- case NOT_EQUAL:
- expr = new BinaryOperator(OpType.NotEquals, left, right);
- break;
- case LTH:
- expr = new BinaryOperator(OpType.LessThan, left, right);
- break;
- case LEQ:
- expr = new BinaryOperator(OpType.LessThanOrEquals, left, right);
- break;
- case GTH:
- expr = new BinaryOperator(OpType.GreaterThan, left, right);
- break;
- case GEQ:
- expr = new BinaryOperator(OpType.GreaterThanOrEquals, left, right);
- break;
-
- // SQL standard predicates
- case IS_NULL:
- expr = new BinaryOperator(OpType.IsNullPredicate, left, right);
- break;
- case CASE:
- expr = new BinaryOperator(OpType.CaseWhen, left, right);
- break;
- case IN:
- InEval inEval = (InEval) binaryEval;
- expr = new InPredicate(left, right, inEval.isNot());
- break;
-
- // String operators and Pattern match predicates
- case LIKE:
- LikePredicateEval likePredicateEval = (LikePredicateEval) binaryEval;
- expr = new PatternMatchPredicate(OpType.LikePredicate, likePredicateEval.isNot(), left, right);
- break;
- case SIMILAR_TO:
- SimilarToPredicateEval similarToPredicateEval = (SimilarToPredicateEval) binaryEval;
- expr = new PatternMatchPredicate(OpType.SimilarToPredicate, similarToPredicateEval.isNot(), left, right);
- break;
- case REGEX:
- RegexPredicateEval regexPredicateEval = (RegexPredicateEval) binaryEval;
- expr = new PatternMatchPredicate(OpType.Regexp, regexPredicateEval.isNot(), left, right);
- break;
- case CONCATENATE:
- default:
- throw new RuntimeException("Unsupported type: " + binaryEval.getType().name());
- }
-
- if (expr != null) {
- exprs.push(expr);
- }
-
- stack.pop();
- return null;
- }
-
- @Override
- protected EvalNode visitConst(Object o, ConstEval evalNode, Stack<EvalNode> stack) {
- Expr value = null;
- DateValue dateValue;
- TimeValue timeValue;
-
- switch (evalNode.getValueType().getType()) {
- case NULL_TYPE:
- value = new NullLiteral();
- break;
- case BOOLEAN:
- value = new LiteralValue(evalNode.getValue().asChars(), LiteralValue.LiteralType.Boolean);
- break;
- case INT1:
- case INT2:
- case INT4:
- value = new LiteralValue(evalNode.getValue().asChars(), LiteralValue.LiteralType.Unsigned_Integer);
- break;
- case INT8:
- value = new LiteralValue(evalNode.getValue().asChars(), LiteralValue.LiteralType.Unsigned_Large_Integer);
- break;
- case FLOAT4:
- case FLOAT8:
- value = new LiteralValue(evalNode.getValue().asChars(), LiteralValue.LiteralType.Unsigned_Float);
- break;
- case TEXT:
- value = new LiteralValue(evalNode.getValue().asChars(), LiteralValue.LiteralType.String);
- break;
- case DATE:
- DateDatum dateDatum = (DateDatum) evalNode.getValue();
-
- dateValue = new DateValue(""+dateDatum.getYear(),
- ""+dateDatum.getMonthOfYear(), ""+dateDatum.getDayOfMonth());
- value = new DateLiteral(dateValue);
-
- break;
- case TIMESTAMP:
- TimestampDatum timestampDatum = (TimestampDatum) evalNode.getValue();
-
- dateValue = new DateValue(""+timestampDatum.getYear(),
- ""+timestampDatum.getMonthOfYear(), ""+timestampDatum.getDayOfMonth());
-
- timeValue = new TimeValue(""+timestampDatum.getHourOfDay()
- , ""+timestampDatum.getMinuteOfHour(), ""+timestampDatum.getSecondOfMinute());
-
- value = new TimestampLiteral(dateValue, timeValue);
- break;
- case TIME:
- TimeDatum timeDatum = (TimeDatum) evalNode.getValue();
- timeValue = new TimeValue(""+timeDatum.getHourOfDay()
- , ""+timeDatum.getMinuteOfHour(), ""+timeDatum.getSecondOfMinute());
-
- value = new TimeLiteral(timeValue);
- break;
- default:
- throw new RuntimeException("Unsupported type: " + evalNode.getValueType().getType().name());
- }
- exprs.push(value);
-
- return super.visitConst(o, evalNode, stack);
- }
-
- @Override
- protected EvalNode visitRowConstant(Object o, RowConstantEval evalNode, Stack<EvalNode> stack) {
- Expr[] values = new Expr[evalNode.getValues().length];
- for (int i = 0; i < evalNode.getValues().length; i++) {
- Datum datum = evalNode.getValues()[i];
- LiteralValue value;
- switch (datum.type()) {
- case BOOLEAN:
- value = new LiteralValue(datum.asChars(), LiteralValue.LiteralType.Boolean);
- break;
- case TEXT:
- value = new LiteralValue(datum.asChars(), LiteralValue.LiteralType.String);
- break;
- case INT1:
- case INT2:
- case INT4:
- value = new LiteralValue(datum.asChars(), LiteralValue.LiteralType.Unsigned_Integer);
- break;
- case INT8:
- value = new LiteralValue(datum.asChars(), LiteralValue.LiteralType.Unsigned_Large_Integer);
- break;
- case FLOAT4:
- case FLOAT8:
- value = new LiteralValue(datum.asChars(), LiteralValue.LiteralType.Unsigned_Float);
- break;
- default:
- throw new RuntimeException("Unsupported type: " + datum.type().name());
- }
- values[i] = value;
- }
- ValueListExpr expr = new ValueListExpr(values);
- exprs.push(expr);
-
- return super.visitRowConstant(o, evalNode, stack);
- }
-
- @Override
- protected EvalNode visitField(Object o, Stack<EvalNode> stack, FieldEval evalNode) {
- ColumnReferenceExpr expr = new ColumnReferenceExpr(tableName, evalNode.getColumnName());
- exprs.push(expr);
- return super.visitField(o, stack, evalNode);
- }
-
- @Override
- protected EvalNode visitBetween(Object o, BetweenPredicateEval evalNode, Stack<EvalNode> stack) {
- stack.push(evalNode);
-
- visit(o, evalNode.getPredicand(), stack);
- Expr predicand = exprs.pop();
-
- visit(o, evalNode.getBegin(), stack);
- Expr begin = exprs.pop();
-
- visit(o, evalNode.getEnd(), stack);
- Expr end = exprs.pop();
-
- Expr expr = new BetweenPredicate(evalNode.isNot(), evalNode.isSymmetric(), predicand, begin, end);
- exprs.push(expr);
-
- stack.pop();
-
- return null;
- }
-
- @Override
- protected EvalNode visitCaseWhen(Object o, CaseWhenEval evalNode, Stack<EvalNode> stack) {
- stack.push(evalNode);
-
- CaseWhenPredicate caseWhenPredicate = new CaseWhenPredicate();
-
- for (CaseWhenEval.IfThenEval ifThenEval : evalNode.getIfThenEvals()) {
- visit(o, ifThenEval.getCondition(), stack);
- Expr condition = exprs.pop();
- visit(o, ifThenEval.getResult(), stack);
- Expr result = exprs.pop();
-
- caseWhenPredicate.addWhen(condition, result);
- }
-
- if (evalNode.hasElse()) {
- visit(o, evalNode.getElse(), stack);
- Expr elseResult = exprs.pop();
- caseWhenPredicate.setElseResult(elseResult);
- }
-
- exprs.push(caseWhenPredicate);
-
- stack.pop();
-
- return null;
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-plan/src/main/java/org/apache/tajo/plan/util/PartitionFilterAlgebraVisitor.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/util/PartitionFilterAlgebraVisitor.java b/tajo-plan/src/main/java/org/apache/tajo/plan/util/PartitionFilterAlgebraVisitor.java
deleted file mode 100644
index 72fd939..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/util/PartitionFilterAlgebraVisitor.java
+++ /dev/null
@@ -1,573 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.tajo.plan.util;
-
-import org.apache.tajo.algebra.*;
-import org.apache.tajo.catalog.CatalogConstants;
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.common.TajoDataTypes.*;
-import org.apache.tajo.datum.TimeDatum;
-import org.apache.tajo.exception.TajoException;
-import org.apache.tajo.exception.UnsupportedException;
-import org.apache.tajo.plan.ExprAnnotator;
-import org.apache.tajo.plan.visitor.SimpleAlgebraVisitor;
-import org.apache.tajo.util.Pair;
-import org.apache.tajo.util.TUtil;
-import org.apache.tajo.util.datetime.DateTimeUtil;
-import org.apache.tajo.util.datetime.TimeMeta;
-
-import java.sql.Date;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.util.List;
-import java.util.Stack;
-import java.util.TimeZone;
-
-/**
- * This build SQL statements for getting partitions informs on CatalogStore with algebra expressions.
- * This visitor assumes that all columns of algebra expressions are reserved for one table.
- *
- */
-public class PartitionFilterAlgebraVisitor extends SimpleAlgebraVisitor<Object, Expr> {
- private String tableAlias;
- private Column column;
- private boolean isHiveCatalog = false;
-
- private Stack<String> queries = new Stack();
- private List<Pair<Type, Object>> parameters = TUtil.newList();
-
- public String getTableAlias() {
- return tableAlias;
- }
-
- public void setTableAlias(String tableAlias) {
- this.tableAlias = tableAlias;
- }
-
- public Column getColumn() {
- return column;
- }
-
- public void setColumn(Column column) {
- this.column = column;
- }
-
- public boolean isHiveCatalog() {
- return isHiveCatalog;
- }
-
- public void setIsHiveCatalog(boolean isHiveCatalog) {
- this.isHiveCatalog = isHiveCatalog;
- }
-
- public List<Pair<Type, Object>> getParameters() {
- return parameters;
- }
-
- public void setParameters(List<Pair<Type, Object>> parameters) {
- this.parameters = parameters;
- }
-
- public void clearParameters() {
- this.parameters.clear();
- }
-
- public String getResult() {
- return queries.pop();
- }
-
- @Override
- public Expr visit(Object ctx, Stack<Expr> stack, Expr expr) throws TajoException {
- if (expr.getType() == OpType.LikePredicate) {
- return visitLikePredicate(ctx, stack, (PatternMatchPredicate) expr);
- } else if (expr.getType() == OpType.SimilarToPredicate) {
- return visitSimilarToPredicate(ctx, stack, (PatternMatchPredicate) expr);
- } else if (expr.getType() == OpType.Regexp) {
- return visitRegexpPredicate(ctx, stack, (PatternMatchPredicate) expr);
- }
- return super.visit(ctx, stack, expr);
- }
-
- @Override
- public Expr visitDateLiteral(Object ctx, Stack<Expr> stack, DateLiteral expr) throws TajoException {
- StringBuilder sb = new StringBuilder();
-
- if (!isHiveCatalog) {
- sb.append("?").append(" )");
- parameters.add(new Pair(Type.DATE, Date.valueOf(expr.toString())));
- } else {
- sb.append("\"").append(expr.toString()).append("\"");
- }
- queries.push(sb.toString());
- return expr;
- }
-
- @Override
- public Expr visitTimestampLiteral(Object ctx, Stack<Expr> stack, TimestampLiteral expr) throws TajoException {
- StringBuilder sb = new StringBuilder();
-
- if (!isHiveCatalog) {
- DateValue dateValue = expr.getDate();
- TimeValue timeValue = expr.getTime();
-
- int [] dates = ExprAnnotator.dateToIntArray(dateValue.getYears(),
- dateValue.getMonths(),
- dateValue.getDays());
- int [] times = ExprAnnotator.timeToIntArray(timeValue.getHours(),
- timeValue.getMinutes(),
- timeValue.getSeconds(),
- timeValue.getSecondsFraction());
-
- long julianTimestamp;
- if (timeValue.hasSecondsFraction()) {
- julianTimestamp = DateTimeUtil.toJulianTimestamp(dates[0], dates[1], dates[2], times[0], times[1], times[2],
- times[3] * 1000);
- } else {
- julianTimestamp = DateTimeUtil.toJulianTimestamp(dates[0], dates[1], dates[2], times[0], times[1], times[2], 0);
- }
-
- TimeMeta tm = new TimeMeta();
- DateTimeUtil.toJulianTimeMeta(julianTimestamp, tm);
-
- TimeZone tz = TimeZone.getDefault();
- DateTimeUtil.toUTCTimezone(tm, tz);
-
- sb.append("?").append(" )");
- Timestamp timestamp = new Timestamp(DateTimeUtil.julianTimeToJavaTime(DateTimeUtil.toJulianTimestamp(tm)));
- parameters.add(new Pair(Type.TIMESTAMP, timestamp));
- } else {
- sb.append("\"").append(expr.toString()).append("\"");
- }
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitTimeLiteral(Object ctx, Stack<Expr> stack, TimeLiteral expr) throws TajoException {
- StringBuilder sb = new StringBuilder();
-
- if (!isHiveCatalog) {
- TimeValue timeValue = expr.getTime();
-
- int [] times = ExprAnnotator.timeToIntArray(timeValue.getHours(),
- timeValue.getMinutes(),
- timeValue.getSeconds(),
- timeValue.getSecondsFraction());
-
- long time;
- if (timeValue.hasSecondsFraction()) {
- time = DateTimeUtil.toTime(times[0], times[1], times[2], times[3] * 1000);
- } else {
- time = DateTimeUtil.toTime(times[0], times[1], times[2], 0);
- }
- TimeDatum timeDatum = new TimeDatum(time);
- TimeMeta tm = timeDatum.asTimeMeta();
-
- TimeZone tz = TimeZone.getDefault();
- DateTimeUtil.toUTCTimezone(tm, tz);
-
- sb.append("?").append(" )");
- parameters.add(new Pair(Type.TIME, new Time(DateTimeUtil.toJavaTime(tm.hours, tm.minutes, tm.secs, tm.fsecs))));
- } else {
- sb.append("\"").append(expr.toString()).append("\"");
- }
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitLiteral(Object ctx, Stack<Expr> stack, LiteralValue expr) throws TajoException {
- StringBuilder sb = new StringBuilder();
-
- if (!isHiveCatalog) {
- sb.append("?").append(" )");
- switch (expr.getValueType()) {
- case Boolean:
- parameters.add(new Pair(Type.BOOLEAN, Boolean.valueOf(expr.getValue())));
- break;
- case Unsigned_Float:
- parameters.add(new Pair(Type.FLOAT8, Double.valueOf(expr.getValue())));
- break;
- case String:
- parameters.add(new Pair(Type.TEXT, expr.getValue()));
- break;
- default:
- parameters.add(new Pair(Type.INT8, Long.valueOf(expr.getValue())));
- break;
- }
- } else {
- switch (expr.getValueType()) {
- case String:
- sb.append("\"").append(expr.getValue()).append("\"");
- break;
- default:
- sb.append(expr.getValue());
- break;
- }
- }
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitValueListExpr(Object ctx, Stack<Expr> stack, ValueListExpr expr) throws TajoException {
- StringBuilder sb = new StringBuilder();
-
- if (!isHiveCatalog) {
- sb.append("(");
- for(int i = 0; i < expr.getValues().length; i++) {
- if (i > 0) {
- sb.append(", ");
- }
- sb.append("?");
- stack.push(expr.getValues()[i]);
- visit(ctx, stack, expr.getValues()[i]);
- stack.pop();
- }
- sb.append(")");
- sb.append(" )");
- queries.push(sb.toString());
- } else {
- throw new UnsupportedException("IN Operator");
- }
-
- return expr;
- }
-
- @Override
- public Expr visitColumnReference(Object ctx, Stack<Expr> stack, ColumnReferenceExpr expr) throws TajoException {
- StringBuilder sb = new StringBuilder();
-
- if (!isHiveCatalog) {
- sb.append("( ").append(tableAlias).append(".").append(CatalogConstants.COL_COLUMN_NAME)
- .append(" = '").append(expr.getName()).append("'")
- .append(" AND ").append(tableAlias).append(".").append(CatalogConstants.COL_PARTITION_VALUE);
- } else {
- sb.append(expr.getName());
- }
- queries.push(sb.toString());
- return expr;
- }
-
-
- @Override
- public Expr visitUnaryOperator(Object ctx, Stack<Expr> stack, UnaryOperator expr) throws TajoException {
- stack.push(expr);
- Expr child = visit(ctx, stack, expr.getChild());
- stack.pop();
-
- if (child.getType() == OpType.Literal) {
- return new NullLiteral();
- }
-
- String childSql = queries.pop();
-
- StringBuilder sb = new StringBuilder();
- if (expr.getType() == OpType.IsNullPredicate) {
- IsNullPredicate isNullPredicate = (IsNullPredicate) expr;
- sb.append(childSql);
- sb.append(" IS ");
- if (isNullPredicate.isNot()) {
- sb.append("NOT NULL");
- } else {
- sb.append("NULL");
- }
- }
-
- if (!isHiveCatalog) {
- sb.append(" )");
- }
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitBetween(Object ctx, Stack<Expr> stack, BetweenPredicate expr) throws TajoException {
- stack.push(expr);
-
- visit(ctx, stack, expr.predicand());
- String predicandSql = queries.pop();
-
- visit(ctx, stack, expr.begin());
- String beginSql= queries.pop();
- if (!isHiveCatalog && beginSql.endsWith(")")) {
- beginSql = beginSql.substring(0, beginSql.length()-1);
- }
-
- visit(ctx, stack, expr.end());
- String endSql = queries.pop();
- if (!isHiveCatalog && endSql.endsWith(")")) {
- endSql = beginSql.substring(0, endSql.length()-1);
- }
-
- StringBuilder sb = new StringBuilder();
- sb.append(predicandSql);
- sb.append(" BETWEEN ");
- sb.append(beginSql);
- sb.append(" AND ");
- sb.append(endSql);
-
- if (!isHiveCatalog) {
- sb.append(")");
- }
-
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitCaseWhen(Object ctx, Stack<Expr> stack, CaseWhenPredicate expr) throws TajoException {
- stack.push(expr);
-
- StringBuilder sb = new StringBuilder();
- sb.append("CASE ");
-
- String condition, result;
-
- for (CaseWhenPredicate.WhenExpr when : expr.getWhens()) {
- visit(ctx, stack, when.getCondition());
- condition = queries.pop();
- visit(ctx, stack, when.getResult());
- result = queries.pop();
-
- String whenSql = condition + " " + result;
- if (!isHiveCatalog && whenSql.endsWith(")")) {
- whenSql = whenSql.substring(0, whenSql.length()-1);
- }
-
- sb.append(whenSql).append(" ");
- }
-
- if (expr.hasElseResult()) {
- visit(ctx, stack, expr.getElseResult());
- String elseSql = queries.pop();
- if (!isHiveCatalog && elseSql.endsWith(")")) {
- elseSql = elseSql.substring(0, elseSql.length()-1);
- }
-
- sb.append("ELSE ").append(elseSql).append(" END");
- }
-
- if (!isHiveCatalog) {
- sb.append(")");
- }
-
- queries.push(sb.toString());
-
- stack.pop();
- return expr;
- }
-
- @Override
- public Expr visitBinaryOperator(Object ctx, Stack<Expr> stack, BinaryOperator expr) throws TajoException {
- stack.push(expr);
- Expr lhs = visit(ctx, stack, expr.getLeft());
- String leftSql = queries.pop();
- Expr rhs = visit(ctx, stack, expr.getRight());
- String rightSql = queries.pop();
- stack.pop();
-
- if (!expr.getLeft().equals(lhs)) {
- expr.setLeft(lhs);
- }
- if (!expr.getRight().equals(rhs)) {
- expr.setRight(rhs);
- }
-
- if (lhs.getType() == OpType.Literal && rhs.getType() == OpType.Literal) {
- return new NullLiteral();
- }
-
- StringBuilder sb = new StringBuilder();
- sb.append(leftSql);
- sb.append(" ").append(getOperator(expr.getType())).append(" ");
- sb.append(rightSql);
- queries.push(sb.toString());
-
- return expr;
- }
-
- private String getOperator(OpType type) {
- String operator;
- switch (type) {
- case Not:
- operator = "!";
- break;
- case And:
- operator = "AND";
- break;
- case Or:
- operator = "OR";
- break;
- case Equals:
- operator = "=";
- break;
- case IsNullPredicate:
- operator = "IS NULL";
- break;
- case NotEquals:
- operator = "<>";
- break;
- case LessThan:
- operator = "<";
- break;
- case LessThanOrEquals:
- operator = "<=";
- break;
- case GreaterThan:
- operator = ">";
- break;
- case GreaterThanOrEquals:
- operator = ">=";
- break;
- case Plus:
- operator = "+";
- break;
- case Minus:
- operator = "-";
- break;
- case Modular:
- operator = "%";
- break;
- case Multiply:
- operator = "*";
- break;
- case Divide:
- operator = "/";
- break;
- case LikePredicate:
- operator = "LIKE";
- break;
- case SimilarToPredicate:
- operator = "([.])";
- break;
- case InPredicate:
- operator = "IN";
- break;
- case Asterisk:
- operator = "*";
- break;
- //TODO: need to check more types.
- default:
- operator = type.name();
- break;
- }
-
- return operator;
- }
-
- @Override
- public Expr visitLikePredicate(Object ctx, Stack<Expr> stack, PatternMatchPredicate expr) throws TajoException {
- stack.push(expr);
-
- visit(ctx, stack, expr.getPredicand());
- String predicand = queries.pop();
- visit(ctx, stack, expr.getPattern());
- String pattern = queries.pop();
- stack.pop();
-
- if(isHiveCatalog) {
- if (pattern.startsWith("%") || pattern.endsWith("%")) {
- throw new UnsupportedException("LIKE Operator with '%'");
- }
- }
- StringBuilder sb = new StringBuilder();
- sb.append(predicand);
-
- if (expr.isNot()) {
- sb.append(" NOT ");
- }
-
- if (expr.isCaseInsensitive()) {
- sb.append(" ILIKE ");
- } else {
- sb.append(" LIKE ");
- }
-
-
- sb.append(pattern);
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitSimilarToPredicate(Object ctx, Stack<Expr> stack, PatternMatchPredicate expr) throws TajoException {
- if (isHiveCatalog) {
- throw new UnsupportedException("SIMILAR TO Operator");
- }
-
- stack.push(expr);
-
- visit(ctx, stack, expr.getPredicand());
- String predicand = queries.pop();
- visit(ctx, stack, expr.getPattern());
- String pattern = queries.pop();
- stack.pop();
-
- StringBuilder sb = new StringBuilder();
- sb.append(predicand);
-
- if (expr.isNot()) {
- sb.append(" NOT ");
- }
-
- sb.append(" SIMILAR TO ");
-
- sb.append(pattern);
- queries.push(sb.toString());
-
- return expr;
- }
-
- @Override
- public Expr visitRegexpPredicate(Object ctx, Stack<Expr> stack, PatternMatchPredicate expr) throws TajoException {
- if (isHiveCatalog) {
- throw new UnsupportedException("REGEXP Operator");
- }
-
- stack.push(expr);
-
- visit(ctx, stack, expr.getPredicand());
- String predicand = queries.pop();
- visit(ctx, stack, expr.getPattern());
- String pattern = queries.pop();
- stack.pop();
-
- StringBuilder sb = new StringBuilder();
- sb.append(predicand);
-
- if (expr.isNot()) {
- sb.append(" NOT ");
- }
- sb.append(" REGEXP ");
-
- sb.append(pattern);
- queries.push(sb.toString());
-
- return expr;
- }
-
-}
\ No newline at end of file
[2/2] tajo git commit: Revert "TAJO-1493: Make partition pruning
based on catalog informations."
Posted by bl...@apache.org.
Revert "TAJO-1493: Make partition pruning based on catalog informations."
This reverts commit b68329101b412649149f261002c53a45f2711d75.
Conflicts:
CHANGES
Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/e1c2d352
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/e1c2d352
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/e1c2d352
Branch: refs/heads/master
Commit: e1c2d352e15f67d148700fe61d78a37640341dab
Parents: b0c0a39
Author: JaeHwa Jung <bl...@apache.org>
Authored: Thu Sep 17 18:56:31 2015 +0900
Committer: JaeHwa Jung <bl...@apache.org>
Committed: Thu Sep 17 18:56:31 2015 +0900
----------------------------------------------------------------------
CHANGES | 2 -
.../tajo/catalog/AbstractCatalogClient.java | 46 +-
.../src/main/proto/CatalogProtocol.proto | 4 +-
.../org/apache/tajo/catalog/CatalogService.java | 9 +-
.../src/main/proto/CatalogProtos.proto | 15 +-
.../tajo/catalog/store/HiveCatalogStore.java | 139 +----
.../catalog/store/TestHiveCatalogStore.java | 76 +--
tajo-catalog/tajo-catalog-server/pom.xml | 8 -
.../org/apache/tajo/catalog/CatalogServer.java | 88 +--
.../tajo/catalog/store/AbstractDBStore.java | 287 +---------
.../apache/tajo/catalog/store/CatalogStore.java | 27 +-
.../org/apache/tajo/catalog/TestCatalog.java | 4 +-
.../TestCatalogAgainstCaseSensitivity.java | 2 +-
.../apache/tajo/exception/ErrorMessages.java | 1 -
.../exception/PartitionNotFoundException.java | 35 --
.../UndefinedPartitionMethodException.java | 4 +-
tajo-common/src/main/proto/errors.proto | 1 -
.../planner/TestEvalNodeToExprConverter.java | 406 -------------
.../tajo/engine/query/TestAlterTable.java | 4 +-
.../tajo/engine/query/TestTablePartitions.java | 432 +-------------
.../planner/physical/ColPartitionStoreExec.java | 12 +-
.../tajo/master/TajoMasterClientService.java | 2 +-
.../apache/tajo/plan/expr/AlgebraicUtil.java | 192 +------
.../rewrite/rules/PartitionedTableRewriter.java | 157 +----
.../tajo/plan/util/EvalNodeToExprConverter.java | 297 ----------
.../util/PartitionFilterAlgebraVisitor.java | 573 -------------------
26 files changed, 67 insertions(+), 2756 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index c485a27..122f895 100644
--- a/CHANGES
+++ b/CHANGES
@@ -556,8 +556,6 @@ Release 0.11.0 - unreleased
TAJO-1853: Add tablespace syntax to the CREATE TABLE section of DDL page.
(jihoon)
- TAJO-1493: Make partition pruning based on catalog informations. (jaehwa)
-
TAJO-1824: Remove partition_keys table from information_schema. (jaehwa)
TAJO-1813: Allow external catalog store for unit testing. (jihoon)
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java b/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
index 6522bf6..1dc7a71 100644
--- a/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
+++ b/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
@@ -434,16 +434,15 @@ public abstract class AbstractCatalogClient implements CatalogService, Closeable
}
@Override
- public final List<PartitionDescProto> getAllPartitions(final String databaseName, final String tableName) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
+ public final List<PartitionDescProto> getPartitions(final String databaseName, final String tableName) {
try {
final BlockingInterface stub = getStub();
- final TableIdentifierProto request = buildTableIdentifier(databaseName, tableName);
+ final PartitionIdentifierProto request = PartitionIdentifierProto.newBuilder()
+ .setDatabaseName(databaseName)
+ .setTableName(tableName)
+ .build();
final GetPartitionsResponse response = stub.getPartitionsByTableName(null, request);
- throwsIfThisError(response.getState(), UndefinedDatabaseException.class);
- throwsIfThisError(response.getState(), UndefinedTableException.class);
- throwsIfThisError(response.getState(), UndefinedPartitionMethodException.class);
ensureOk(response.getState());
return response.getPartitionList();
@@ -453,41 +452,6 @@ public abstract class AbstractCatalogClient implements CatalogService, Closeable
}
@Override
- public List<PartitionDescProto> getPartitionsByAlgebra(PartitionsByAlgebraProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
- try {
- final BlockingInterface stub = getStub();
- GetPartitionsResponse response = stub.getPartitionsByAlgebra(null, request);
-
- throwsIfThisError(response.getState(), UndefinedDatabaseException.class);
- throwsIfThisError(response.getState(), UndefinedTableException.class);
- throwsIfThisError(response.getState(), UndefinedPartitionMethodException.class);
- ensureOk(response.getState());
- return response.getPartitionList();
- } catch (ServiceException e) {
- LOG.error(e.getMessage(), e);
- return null;
- }
- }
-
- @Override
- public List<PartitionDescProto> getPartitionsByFilter(PartitionsByFilterProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
- try {
- final BlockingInterface stub = getStub();
- GetPartitionsResponse response = stub.getPartitionsByFilter(null, request);
-
- throwsIfThisError(response.getState(), UndefinedDatabaseException.class);
- throwsIfThisError(response.getState(), UndefinedTableException.class);
- throwsIfThisError(response.getState(), UndefinedPartitionMethodException.class);
- ensureOk(response.getState());
- return response.getPartitionList();
- } catch (ServiceException e) {
- LOG.error(e.getMessage(), e);
- return null;
- }
- }
- @Override
public List<TablePartitionProto> getAllPartitions() {
try {
final BlockingInterface stub = getStub();
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto b/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
index 170e2ae..8cc8e2f 100644
--- a/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
+++ b/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
@@ -121,11 +121,9 @@ service CatalogProtocolService {
rpc existPartitionMethod(TableIdentifierProto) returns (ReturnState);
rpc getPartitionByPartitionName(PartitionIdentifierProto) returns (GetPartitionDescResponse);
- rpc getPartitionsByTableName(TableIdentifierProto) returns (GetPartitionsResponse);
+ rpc getPartitionsByTableName(PartitionIdentifierProto) returns (GetPartitionsResponse);
rpc getAllPartitions(NullProto) returns (GetTablePartitionsResponse);
rpc addPartitions(AddPartitionsProto) returns (ReturnState);
- rpc getPartitionsByAlgebra(PartitionsByAlgebraProto) returns (GetPartitionsResponse);
- rpc getPartitionsByFilter(PartitionsByFilterProto) returns (GetPartitionsResponse);
rpc createIndex(IndexDescProto) returns (ReturnState);
rpc dropIndex(IndexNameProto) returns (ReturnState);
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
index 3ac9714..b031313 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
+++ b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
@@ -172,14 +172,7 @@ public interface CatalogService {
throws UndefinedPartitionException, UndefinedPartitionMethodException, UndefinedDatabaseException,
UndefinedTableException;
- List<PartitionDescProto> getAllPartitions(String databaseName, String tableName) throws UndefinedDatabaseException,
- UndefinedTableException, UndefinedPartitionMethodException;
-
- List<PartitionDescProto> getPartitionsByAlgebra(PartitionsByAlgebraProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException;
-
- List<PartitionDescProto> getPartitionsByFilter(PartitionsByFilterProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException;
+ List<PartitionDescProto> getPartitions(String databaseName, String tableName);
List<TablePartitionProto> getAllPartitions();
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto b/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
index eb2c938..cfac82f 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
+++ b/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
@@ -252,7 +252,8 @@ message PartitionDescProto {
message PartitionKeyProto {
required string columnName = 1;
- required string partitionValue = 2;
+ optional string parentColumnName = 2;
+ required string partitionValue = 3;
}
message PartitionIdentifierProto {
@@ -261,18 +262,6 @@ message PartitionIdentifierProto {
optional string partitionName = 3;
}
-message PartitionsByAlgebraProto {
- required string databaseName = 1;
- required string tableName = 2;
- required string algebra = 3;
-}
-
-message PartitionsByFilterProto {
- required string databaseName = 1;
- required string tableName = 2;
- required string filter = 3;
-}
-
message TablespaceProto {
required string spaceName = 1;
required string uri = 2;
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/main/java/org/apache/tajo/catalog/store/HiveCatalogStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/main/java/org/apache/tajo/catalog/store/HiveCatalogStore.java b/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/main/java/org/apache/tajo/catalog/store/HiveCatalogStore.java
index 6196b5d..e2229ba 100644
--- a/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/main/java/org/apache/tajo/catalog/store/HiveCatalogStore.java
+++ b/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/main/java/org/apache/tajo/catalog/store/HiveCatalogStore.java
@@ -34,9 +34,6 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe;
import org.apache.tajo.BuiltinStorages;
import org.apache.tajo.TajoConstants;
-import org.apache.tajo.algebra.Expr;
-import org.apache.tajo.algebra.IsNullPredicate;
-import org.apache.tajo.algebra.JsonHelper;
import org.apache.tajo.catalog.*;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
import org.apache.tajo.catalog.proto.CatalogProtos;
@@ -45,8 +42,6 @@ import org.apache.tajo.catalog.statistics.TableStats;
import org.apache.tajo.common.TajoDataTypes;
import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.exception.*;
-import org.apache.tajo.plan.expr.AlgebraicUtil;
-import org.apache.tajo.plan.util.PartitionFilterAlgebraVisitor;
import org.apache.tajo.storage.StorageConstants;
import org.apache.tajo.util.KeyValueSet;
import org.apache.tajo.util.TUtil;
@@ -698,7 +693,7 @@ public class HiveCatalogStore extends CatalogConstants implements CatalogStore {
Table table = client.getHiveClient().getTable(databaseName, tableName);
List<FieldSchema> columns = table.getSd().getCols();
columns.add(new FieldSchema(columnProto.getName(),
- HiveCatalogUtil.getHiveFieldType(columnProto.getDataType()), ""));
+ HiveCatalogUtil.getHiveFieldType(columnProto.getDataType()), ""));
client.getHiveClient().alter_table(databaseName, tableName, table);
@@ -850,135 +845,11 @@ public class HiveCatalogStore extends CatalogConstants implements CatalogStore {
}
@Override
- public List<CatalogProtos.PartitionDescProto> getAllPartitions(String databaseName, String tableName)
- throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
- PartitionsByFilterProto.Builder request = PartitionsByFilterProto.newBuilder();
- request.setDatabaseName(databaseName);
- request.setTableName(tableName);
- request.setFilter("");
-
- return getPartitionsByFilter(request.build());
- }
-
- @Override
- public List<PartitionDescProto> getPartitionsByAlgebra(PartitionsByAlgebraProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
-
- List<PartitionDescProto> list = null;
-
- try {
- String databaseName = request.getDatabaseName();
- String tableName = request.getTableName();
-
- TableDescProto tableDesc = getTable(databaseName, tableName);
- String filter = getFilter(databaseName, tableName, tableDesc.getPartition().getExpressionSchema().getFieldsList()
- , request.getAlgebra());
- list = getPartitionsByFilterFromHiveMetaStore(databaseName, tableName, filter);
- } catch (Exception se) {
- throw new TajoInternalError(se);
- }
-
- return list;
- }
-
- private String getFilter(String databaseName, String tableName, List<ColumnProto> partitionColumns
- , String json) throws TajoException {
-
- Expr[] exprs = null;
-
- if (json != null && !json.isEmpty()) {
- Expr algebra = JsonHelper.fromJson(json, Expr.class);
- exprs = AlgebraicUtil.toConjunctiveNormalFormArray(algebra);
- }
-
- PartitionFilterAlgebraVisitor visitor = new PartitionFilterAlgebraVisitor();
- visitor.setIsHiveCatalog(true);
-
- Expr[] filters = AlgebraicUtil.getAccumulatedFiltersByExpr(databaseName + "." + tableName, partitionColumns, exprs);
-
- StringBuffer sb = new StringBuffer();
-
- // Write join clause from second column to last column.
- Column target;
-
- int addedFilter = 0;
- String result;
- for (int i = 0; i < partitionColumns.size(); i++) {
- target = new Column(partitionColumns.get(i));
-
- if (!(filters[i] instanceof IsNullPredicate)) {
- visitor.setColumn(target);
- visitor.visit(null, new Stack<Expr>(), filters[i]);
- result = visitor.getResult();
-
- // If visitor build filter successfully, add filter to be used for executing hive api.
- if (result.length() > 0) {
- if (addedFilter > 0) {
- sb.append(" AND ");
- }
- sb.append(" ( ").append(result).append(" ) ");
- addedFilter++;
- }
- }
- }
-
- return sb.toString();
- }
-
-
- @Override
- public List<PartitionDescProto> getPartitionsByFilter(PartitionsByFilterProto request)
- throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
- String databaseName = request.getDatabaseName();
- String tableName = request.getTableName();
-
- if (!existDatabase(databaseName)) {
- throw new UndefinedDatabaseException(tableName);
- }
-
- if (!existTable(databaseName, tableName)) {
- throw new UndefinedTableException(tableName);
- }
-
- if (!existPartitionMethod(databaseName, tableName)) {
- throw new UndefinedPartitionMethodException(tableName);
- }
-
- return getPartitionsByFilterFromHiveMetaStore(databaseName, tableName, request.getFilter());
+ public List<CatalogProtos.PartitionDescProto> getPartitions(String databaseName,
+ String tableName) {
+ throw new UnsupportedOperationException();
}
- private List<PartitionDescProto> getPartitionsByFilterFromHiveMetaStore(String databaseName, String tableName,
- String filter) {
- HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null;
- List<PartitionDescProto> partitions = null;
-
- try {
- partitions = TUtil.newList();
- client = clientPool.getClient();
-
- List<Partition> hivePartitions = client.getHiveClient().listPartitionsByFilter(databaseName, tableName
- , filter, (short) -1);
-
- for (Partition hivePartition : hivePartitions) {
- CatalogProtos.PartitionDescProto.Builder builder = CatalogProtos.PartitionDescProto.newBuilder();
- builder.setPath(hivePartition.getSd().getLocation());
-
- int startIndex = hivePartition.getSd().getLocation().indexOf(tableName) + tableName.length();
- String partitionName = hivePartition.getSd().getLocation().substring(startIndex+1);
- builder.setPartitionName(partitionName);
-
- partitions.add(builder.build());
- }
- } catch (Exception e) {
- throw new TajoInternalError(e);
- } finally {
- if (client != null) {
- client.release();
- }
- }
-
- return partitions;
- }
@Override
public CatalogProtos.PartitionDescProto getPartition(String databaseName, String tableName,
@@ -1160,7 +1031,7 @@ public class HiveCatalogStore extends CatalogConstants implements CatalogStore {
// Unfortunately, hive client add_partitions doesn't run as expected. The method never read the ifNotExists
// parameter. So, if Tajo adds existing partition to Hive, it will threw AlreadyExistsException. To avoid
// above error, we need to filter existing partitions before call add_partitions.
- if (existingPartition == null) {
+ if (existingPartition != null) {
Partition partition = new Partition();
partition.setDbName(databaseName);
partition.setTableName(tableName);
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/test/java/org/apache/tajo/catalog/store/TestHiveCatalogStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/test/java/org/apache/tajo/catalog/store/TestHiveCatalogStore.java b/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/test/java/org/apache/tajo/catalog/store/TestHiveCatalogStore.java
index e575505..b3af179 100644
--- a/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/test/java/org/apache/tajo/catalog/store/TestHiveCatalogStore.java
+++ b/tajo-catalog/tajo-catalog-drivers/tajo-hive/src/test/java/org/apache/tajo/catalog/store/TestHiveCatalogStore.java
@@ -34,7 +34,6 @@ import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.storage.StorageConstants;
import org.apache.tajo.util.CommonTestingUtil;
import org.apache.tajo.util.KeyValueSet;
-import org.apache.tajo.util.TUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -262,44 +261,10 @@ public class TestHiveCatalogStore {
}
testAddPartition(table1.getUri(), NATION, "n_nationkey=10/n_date=20150101");
- testAddPartition(table1.getUri(), NATION, "n_nationkey=10/n_date=20150102");
- testAddPartition(table1.getUri(), NATION, "n_nationkey=20/n_date=20150101");
testAddPartition(table1.getUri(), NATION, "n_nationkey=20/n_date=20150102");
- testAddPartition(table1.getUri(), NATION, "n_nationkey=30/n_date=20150101");
- testAddPartition(table1.getUri(), NATION, "n_nationkey=30/n_date=20150102");
-
- List<String> partitionNames = TUtil.newList();
- partitionNames.add("n_nationkey=40/n_date=20150801");
- partitionNames.add("n_nationkey=40/n_date=20150802");
- partitionNames.add("n_nationkey=50/n_date=20150801");
- partitionNames.add("n_nationkey=50/n_date=20150802");
- testAddPartitions(table1.getUri(), NATION, partitionNames);
-
- CatalogProtos.PartitionsByFilterProto.Builder FilterRequest = CatalogProtos
- .PartitionsByFilterProto.newBuilder();
-
- FilterRequest.setDatabaseName(DB_NAME);
- FilterRequest.setTableName(NATION);
- FilterRequest.setFilter("n_nationkey = 10 or n_nationkey = 20");
-
- List<CatalogProtos.PartitionDescProto> tablePartitions = store.getPartitionsByFilter(FilterRequest.build());
- assertEquals(tablePartitions.size(), 4);
-
- FilterRequest = CatalogProtos.PartitionsByFilterProto.newBuilder();
- FilterRequest.setDatabaseName(DB_NAME);
- FilterRequest.setTableName(NATION);
-
- FilterRequest.setFilter("n_nationkey = 10 and n_date = \"20150101\"");
-
- tablePartitions = store.getPartitionsByFilter(FilterRequest.build());
- assertEquals(tablePartitions.size(), 1);
testDropPartition(NATION, "n_nationkey=10/n_date=20150101");
- testDropPartition(NATION, "n_nationkey=10/n_date=20150102");
- testDropPartition(NATION, "n_nationkey=20/n_date=20150101");
testDropPartition(NATION, "n_nationkey=20/n_date=20150102");
- testDropPartition(NATION, "n_nationkey=30/n_date=20150101");
- testDropPartition(NATION, "n_nationkey=30/n_date=20150102");
CatalogProtos.PartitionDescProto partition = store.getPartition(DB_NAME, NATION, "n_nationkey=10/n_date=20150101");
assertNull(partition);
@@ -351,45 +316,6 @@ public class TestHiveCatalogStore {
}
}
- private void testAddPartitions(URI uri, String tableName, List<String> partitionNames) throws Exception {
- List<CatalogProtos.PartitionDescProto> partitions = TUtil.newList();
- for (String partitionName : partitionNames) {
- CatalogProtos.PartitionDescProto.Builder builder = CatalogProtos.PartitionDescProto.newBuilder();
- builder.setPartitionName(partitionName);
- Path path = new Path(uri.getPath(), partitionName);
- builder.setPath(path.toString());
-
- List<PartitionKeyProto> partitionKeyList = new ArrayList<PartitionKeyProto>();
- String[] split = partitionName.split("/");
- for(int i = 0; i < split.length; i++) {
- String[] eachPartitionName = split[i].split("=");
-
- PartitionKeyProto.Builder keyBuilder = PartitionKeyProto.newBuilder();
- keyBuilder.setColumnName(eachPartitionName[0]);
- keyBuilder.setPartitionValue(eachPartitionName[1]);
- partitionKeyList.add(keyBuilder.build());
- }
- builder.addAllPartitionKeys(partitionKeyList);
- partitions.add(builder.build());
- }
-
- store.addPartitions(DB_NAME, tableName, partitions, true);
-
- for (String partitionName : partitionNames) {
- CatalogProtos.PartitionDescProto resultDesc = store.getPartition(DB_NAME, NATION, partitionName);
- assertNotNull(resultDesc);
- assertEquals(resultDesc.getPartitionName(), partitionName);
- assertEquals(resultDesc.getPath(), uri.toString() + "/" + partitionName);
- assertEquals(resultDesc.getPartitionKeysCount(), 2);
-
- String[] split = partitionName.split("/");
- for (int i = 0; i < resultDesc.getPartitionKeysCount(); i++) {
- CatalogProtos.PartitionKeyProto keyProto = resultDesc.getPartitionKeys(i);
- String[] eachName = split[i].split("=");
- assertEquals(keyProto.getPartitionValue(), eachName[1]);
- }
- }
- }
private void testDropPartition(String tableName, String partitionName) throws Exception {
AlterTableDesc alterTableDesc = new AlterTableDesc();
@@ -479,7 +405,7 @@ public class TestHiveCatalogStore {
}
assertEquals(StorageConstants.DEFAULT_BINARY_SERDE,
- table1.getMeta().getOption(StorageConstants.SEQUENCEFILE_SERDE));
+ table1.getMeta().getOption(StorageConstants.SEQUENCEFILE_SERDE));
store.dropTable(DB_NAME, REGION);
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-server/pom.xml
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/pom.xml b/tajo-catalog/tajo-catalog-server/pom.xml
index f00f8d3..b47b599 100644
--- a/tajo-catalog/tajo-catalog-server/pom.xml
+++ b/tajo-catalog/tajo-catalog-server/pom.xml
@@ -148,14 +148,6 @@
<artifactId>tajo-rpc-protobuf</artifactId>
</dependency>
<dependency>
- <groupId>org.apache.tajo</groupId>
- <artifactId>tajo-algebra</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.tajo</groupId>
- <artifactId>tajo-plan</artifactId>
- </dependency>
- <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>provided</scope>
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
index c8d4045..dff292f 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
@@ -962,7 +962,7 @@ public class CatalogServer extends AbstractService {
}
@Override
- public GetPartitionsResponse getPartitionsByTableName(RpcController controller, TableIdentifierProto request)
+ public GetPartitionsResponse getPartitionsByTableName(RpcController controller, PartitionIdentifierProto request)
throws ServiceException {
String dbName = request.getDatabaseName();
String tbName = request.getTableName();
@@ -985,7 +985,7 @@ public class CatalogServer extends AbstractService {
rlock.lock();
try {
- List<PartitionDescProto> partitions = store.getAllPartitions(dbName, tbName);
+ List<PartitionDescProto> partitions = store.getPartitions(dbName, tbName);
GetPartitionsResponse.Builder builder = GetPartitionsResponse.newBuilder();
for (PartitionDescProto partition : partitions) {
@@ -1032,90 +1032,6 @@ public class CatalogServer extends AbstractService {
}
@Override
- public GetPartitionsResponse getPartitionsByAlgebra(RpcController controller,
- PartitionsByAlgebraProto request) throws ServiceException {
- String dbName = request.getDatabaseName();
- String tbName = request.getTableName();
-
- try {
- // linked meta data do not support partition.
- // So, the request that wants to get partitions in this db will be failed.
- if (linkedMetadataManager.existsDatabase(dbName)) {
- return GetPartitionsResponse.newBuilder().setState(errUndefinedPartitionMethod(tbName)).build();
- }
- } catch (Throwable t) {
- printStackTraceIfError(LOG, t);
- return GetPartitionsResponse.newBuilder()
- .setState(returnError(t))
- .build();
- }
-
- if (metaDictionary.isSystemDatabase(dbName)) {
- return GetPartitionsResponse.newBuilder().setState(errUndefinedPartitionMethod(tbName)).build();
- }
-
- rlock.lock();
- try {
- GetPartitionsResponse.Builder builder = GetPartitionsResponse.newBuilder();
- List<PartitionDescProto> partitions = store.getPartitionsByAlgebra(request);
- builder.addAllPartition(partitions);
- builder.setState(OK);
- return builder.build();
- } catch (Throwable t) {
- printStackTraceIfError(LOG, t);
-
- return GetPartitionsResponse.newBuilder()
- .setState(returnError(t))
- .build();
-
- } finally {
- rlock.unlock();
- }
- }
-
- @Override
- public GetPartitionsResponse getPartitionsByFilter(RpcController controller,
- PartitionsByFilterProto request) throws ServiceException {
- String dbName = request.getDatabaseName();
- String tbName = request.getTableName();
-
- try {
- // linked meta data do not support partition.
- // So, the request that wants to get partitions in this db will be failed.
- if (linkedMetadataManager.existsDatabase(dbName)) {
- return GetPartitionsResponse.newBuilder().setState(errUndefinedPartitionMethod(tbName)).build();
- }
- } catch (Throwable t) {
- printStackTraceIfError(LOG, t);
- return GetPartitionsResponse.newBuilder()
- .setState(returnError(t))
- .build();
- }
-
- if (metaDictionary.isSystemDatabase(dbName)) {
- return GetPartitionsResponse.newBuilder().setState(errUndefinedPartitionMethod(tbName)).build();
- }
-
- rlock.lock();
- try {
- GetPartitionsResponse.Builder builder = GetPartitionsResponse.newBuilder();
- List<PartitionDescProto> partitions = store.getPartitionsByFilter(request);
- builder.addAllPartition(partitions);
- builder.setState(OK);
- return builder.build();
- } catch (Throwable t) {
- printStackTraceIfError(LOG, t);
-
- return GetPartitionsResponse.newBuilder()
- .setState(returnError(t))
- .build();
-
- } finally {
- rlock.unlock();
- }
- }
-
- @Override
public ReturnState addPartitions(RpcController controller, AddPartitionsProto request) {
TableIdentifierProto identifier = request.getTableIdentifier();
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
index b4f02e6..0b1b120 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
@@ -25,18 +25,14 @@ import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.tajo.algebra.Expr;
-import org.apache.tajo.algebra.JsonHelper;
import org.apache.tajo.annotation.Nullable;
import org.apache.tajo.catalog.*;
import org.apache.tajo.catalog.proto.CatalogProtos;
import org.apache.tajo.catalog.proto.CatalogProtos.*;
import org.apache.tajo.common.TajoDataTypes;
-import org.apache.tajo.common.TajoDataTypes.*;
+import org.apache.tajo.common.TajoDataTypes.Type;
import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.exception.*;
-import org.apache.tajo.plan.expr.*;
-import org.apache.tajo.plan.util.PartitionFilterAlgebraVisitor;
import org.apache.tajo.util.FileUtil;
import org.apache.tajo.util.Pair;
import org.apache.tajo.util.TUtil;
@@ -44,7 +40,6 @@ import org.apache.tajo.util.TUtil;
import java.io.IOException;
import java.net.URI;
import java.sql.*;
-import java.sql.Date;
import java.util.*;
import static org.apache.tajo.catalog.proto.CatalogProtos.AlterTablespaceProto.AlterTablespaceCommand;
@@ -2189,9 +2184,8 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
}
@Override
- public List<PartitionDescProto> getAllPartitions(String databaseName, String tableName)
- throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException,
- UndefinedPartitionException{
+ public List<PartitionDescProto> getPartitions(String databaseName, String tableName)
+ throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
Connection conn = null;
ResultSet res = null;
@@ -2231,252 +2225,6 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
return partitions;
}
- /**
- * Check if list of partitions exist on catalog.
- *
- *
- * @param databaseId
- * @param tableId
- * @return
- */
- public boolean existPartitionsOnCatalog(int tableId) {
- Connection conn = null;
- ResultSet res = null;
- PreparedStatement pstmt = null;
- boolean result = false;
-
- try {
- String sql = "SELECT COUNT(*) CNT FROM "
- + TB_PARTTIONS +" WHERE " + COL_TABLES_PK + " = ? ";
-
- if (LOG.isDebugEnabled()) {
- LOG.debug(sql);
- }
-
- conn = getConnection();
- pstmt = conn.prepareStatement(sql);
- pstmt.setInt(1, tableId);
- res = pstmt.executeQuery();
-
- if (res.next()) {
- if (res.getInt("CNT") > 0) {
- result = true;
- }
- }
- } catch (SQLException se) {
- throw new TajoInternalError(se);
- } finally {
- CatalogUtil.closeQuietly(pstmt, res);
- }
- return result;
- }
-
- @Override
- public List<PartitionDescProto> getPartitionsByFilter(PartitionsByFilterProto request) {
- throw new TajoRuntimeException(new UnsupportedException());
- }
-
- @Override
- public List<PartitionDescProto> getPartitionsByAlgebra(PartitionsByAlgebraProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {
- Connection conn = null;
- PreparedStatement pstmt = null;
- ResultSet res = null;
- int currentIndex = 1;
- String selectStatement = null;
-
- List<PartitionDescProto> partitions = TUtil.newList();
- List<PartitionFilterSet> filterSets = TUtil.newList();
-
- try {
- int databaseId = getDatabaseId(request.getDatabaseName());
- int tableId = getTableId(databaseId, request.getDatabaseName(), request.getTableName());
- if (!existPartitionMethod(request.getDatabaseName(), request.getTableName())) {
- throw new UndefinedPartitionMethodException(request.getTableName());
- }
-
- if (!existPartitionsOnCatalog(tableId)) {
- throw new PartitionNotFoundException(request.getTableName());
- }
-
- TableDescProto tableDesc = getTable(request.getDatabaseName(), request.getTableName());
-
- selectStatement = getSelectStatementForPartitions(tableDesc.getTableName(), tableDesc.getPartition()
- .getExpressionSchema().getFieldsList(), request.getAlgebra(), filterSets);
-
- conn = getConnection();
- pstmt = conn.prepareStatement(selectStatement);
-
- // Set table id by force because first parameter of all direct sql is table id
- pstmt.setInt(currentIndex, tableId);
- currentIndex++;
-
- for (PartitionFilterSet filter : filterSets) {
- // Set table id by force because all filters have table id as first parameter.
- pstmt.setInt(currentIndex, tableId);
- currentIndex++;
-
- for (Pair<Type, Object> parameter : filter.getParameters()) {
- switch (parameter.getFirst()) {
- case BOOLEAN:
- pstmt.setBoolean(currentIndex, (Boolean)parameter.getSecond());
- break;
- case INT8:
- pstmt.setLong(currentIndex, (Long) parameter.getSecond());
- break;
- case FLOAT8:
- pstmt.setDouble(currentIndex, (Double) parameter.getSecond());
- break;
- case DATE:
- pstmt.setDate(currentIndex, (Date) parameter.getSecond());
- break;
- case TIMESTAMP:
- pstmt.setTimestamp(currentIndex, (Timestamp) parameter.getSecond());
- break;
- case TIME:
- pstmt.setTime(currentIndex, (Time) parameter.getSecond());
- break;
- default:
- pstmt.setString(currentIndex, (String) parameter.getSecond());
- break;
- }
- currentIndex++;
- }
- }
-
- res = pstmt.executeQuery();
-
- while (res.next()) {
- PartitionDescProto.Builder builder = PartitionDescProto.newBuilder();
-
- builder.setId(res.getInt(COL_PARTITIONS_PK));
- builder.setPartitionName(res.getString("PARTITION_NAME"));
- builder.setPath(res.getString("PATH"));
-
- partitions.add(builder.build());
- }
- } catch (TajoException se) {
- throw new TajoInternalError(se);
- } catch (SQLException se) {
- throw new TajoInternalError(se);
- } finally {
- CatalogUtil.closeQuietly(pstmt, res);
- }
-
- return partitions;
- }
-
- /**
- * Create a select statement and parameters for querying partitions and partition keys in CatalogStore.
- *
- * For example, consider you have a partitioned table for three columns (i.e., col1, col2, col3).
- * Assume that an user gives a condition WHERE (col1 ='1' or col1 = '100') and col3 > 20.
- * There is no filter condition corresponding to col2.
- *
- * Then, the sql would be generated as following:
- *
- * SELECT A.PARTITION_ID, A.PARTITION_NAME, A.PATH FROM PARTITIONS A
- * WHERE A.TID = ?
- * AND A.PARTITION_ID IN (
- * SELECT T1.PARTITION_ID FROM PARTITION_KEYS T1
- * JOIN PARTITION_KEYS T2 ON T1.TID=T2.TID AND T1.PARTITION_ID = T2.PARTITION_ID AND T2.TID = ?
- * AND ( T2.COLUMN_NAME = 'col2' AND T2.PARTITION_VALUE IS NOT NULL )
- * JOIN PARTITION_KEYS T3 ON T1.TID=T3.TID AND T1.PARTITION_ID = T3.PARTITION_ID AND T3.TID = ?
- * AND ( T3.COLUMN_NAME = 'col3' AND T3.PARTITION_VALUE > ? )
- * WHERE T1.TID = ? AND ( T1.COLUMN_NAME = 'col1' AND T1.PARTITION_VALUE = ? )
- * OR ( T1.COLUMN_NAME = 'col1' AND T1.PARTITION_VALUE = ? )
- )
- *
- * @param partitionColumns
- * @param json
- * @param filterSets
- * @return
- * @throws TajoException
- * @throws SQLException
- */
- private String getSelectStatementForPartitions(String tableName, List<ColumnProto> partitionColumns, String json,
- List<PartitionFilterSet> filterSets) throws TajoException, SQLException {
-
- Expr[] exprs = null;
-
- if (json != null && !json.isEmpty()) {
- Expr algebra = JsonHelper.fromJson(json, Expr.class);
- exprs = AlgebraicUtil.toConjunctiveNormalFormArray(algebra);
- }
-
- // Write table alias for all levels
- String tableAlias;
-
- PartitionFilterAlgebraVisitor visitor = new PartitionFilterAlgebraVisitor();
- visitor.setIsHiveCatalog(false);
-
- Expr[] filters = AlgebraicUtil.getAccumulatedFiltersByExpr(tableName, partitionColumns, exprs);
-
- StringBuffer sb = new StringBuffer();
- sb.append("\n SELECT A.").append(CatalogConstants.COL_PARTITIONS_PK)
- .append(", A.PARTITION_NAME, A.PATH FROM ").append(CatalogConstants.TB_PARTTIONS).append(" A ")
- .append("\n WHERE A.").append(CatalogConstants.COL_TABLES_PK).append(" = ? ")
- .append("\n AND A.").append(CatalogConstants.COL_PARTITIONS_PK).append(" IN (")
- .append("\n SELECT T1.").append(CatalogConstants.COL_PARTITIONS_PK)
- .append(" FROM ").append(CatalogConstants.TB_PARTTION_KEYS).append(" T1 ");
-
- // Write join clause from second column to last column.
- Column target;
-
- for (int i = 1; i < partitionColumns.size(); i++) {
- target = new Column(partitionColumns.get(i));
- tableAlias = "T" + (i+1);
-
- visitor.setColumn(target);
- visitor.setTableAlias(tableAlias);
- visitor.visit(null, new Stack<Expr>(), filters[i]);
-
- sb.append("\n JOIN ").append(CatalogConstants.TB_PARTTION_KEYS).append(" ").append(tableAlias)
- .append(" ON T1.").append(CatalogConstants.COL_TABLES_PK).append("=")
- .append(tableAlias).append(".").append(CatalogConstants.COL_TABLES_PK)
- .append(" AND T1.").append(CatalogConstants.COL_PARTITIONS_PK)
- .append(" = ").append(tableAlias).append(".").append(CatalogConstants.COL_PARTITIONS_PK)
- .append(" AND ").append(tableAlias).append(".").append(CatalogConstants.COL_TABLES_PK).append(" = ? AND ");
- sb.append(visitor.getResult());
-
- // Set parameters for executing PrepareStament
- PartitionFilterSet filterSet = new PartitionFilterSet();
- filterSet.setColumnName(target.getSimpleName());
-
- List<Pair<Type, Object>> list = TUtil.newList();
- list.addAll(visitor.getParameters());
- filterSet.addParameters(list);
-
- filterSets.add(filterSet);
- visitor.clearParameters();
- }
-
- // Write where clause for first column
- target = new Column(partitionColumns.get(0));
- tableAlias = "T1";
- visitor.setColumn(target);
- visitor.setTableAlias(tableAlias);
- visitor.visit(null, new Stack<Expr>(), filters[0]);
-
- sb.append("\n WHERE T1.").append(CatalogConstants.COL_TABLES_PK).append(" = ? AND ");
- sb.append(visitor.getResult())
- .append("\n )");
- sb.append("\n ORDER BY A.PARTITION_NAME");
-
- // Set parameters for executing PrepareStament
- PartitionFilterSet filterSet = new PartitionFilterSet();
- filterSet.setColumnName(target.getSimpleName());
-
- List<Pair<Type, Object>> list = TUtil.newList();
- list.addAll(visitor.getParameters());
- filterSet.addParameters(list);
-
- filterSets.add(filterSet);
-
- return sb.toString();
- }
-
-
@Override
public List<TablePartitionProto> getAllPartitions() {
Connection conn = null;
@@ -3196,33 +2944,4 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
return exist;
}
-
- class PartitionFilterSet {
- private String columnName;
- private List<Pair<Type, Object>> parameters;
-
- public PartitionFilterSet() {
- parameters = TUtil.newList();
- }
-
- public String getColumnName() {
- return columnName;
- }
-
- public void setColumnName(String columnName) {
- this.columnName = columnName;
- }
-
- public List<Pair<Type, Object>> getParameters() {
- return parameters;
- }
-
- public void setParameters(List<Pair<Type, Object>> parameters) {
- this.parameters = parameters;
- }
-
- public void addParameters(List<Pair<Type, Object>> parameters) {
- this.parameters.addAll(parameters);
- }
- }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
index 7582a44..a067a53 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
@@ -97,30 +97,13 @@ public interface CatalogStore extends Closeable {
* @return
* @throws TajoException
*/
- List<CatalogProtos.PartitionDescProto> getAllPartitions(String databaseName, String tableName) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException, UndefinedPartitionException;
+ List<CatalogProtos.PartitionDescProto> getPartitions(String databaseName, String tableName) throws
+ UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException;
CatalogProtos.PartitionDescProto getPartition(String databaseName, String tableName,
- String partitionName) throws UndefinedDatabaseException,
- UndefinedTableException, UndefinedPartitionMethodException, UndefinedPartitionException;
-
- /**
- * PartitionedTableRewriter take a look into partition directories for rewriting filter conditions. But if there
- * are lots of sub directories on HDFS, such as, more than 10,000 directories,
- * it might be cause overload to NameNode. Thus, CatalogStore need to provide partition directories for specified
- * filter conditions. This scan right partition directories on CatalogStore with where clause.
- *
- * @param request contains database name, table name, algebra expressions, parameter for executing PrepareStatement
- * @return list of TablePartitionProto
- * @throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException
- */
- List<PartitionDescProto> getPartitionsByAlgebra(PartitionsByAlgebraProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException,
- UndefinedOperatorException;
-
- List<PartitionDescProto> getPartitionsByFilter(PartitionsByFilterProto request) throws
- UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException,
- UndefinedOperatorException;
+ String partitionName)
+ throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionException,
+ UndefinedPartitionMethodException;
List<TablePartitionProto> getAllPartitions();
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
index 6206a1f..8720105 100644
--- a/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
+++ b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
@@ -816,14 +816,14 @@ public class TestCatalog {
testAddPartition(tableName, "id=10/name=aaa");
testAddPartition(tableName, "id=20/name=bbb");
- List<CatalogProtos.PartitionDescProto> partitions = catalog.getAllPartitions(DEFAULT_DATABASE_NAME, "addedtable");
+ List<CatalogProtos.PartitionDescProto> partitions = catalog.getPartitions(DEFAULT_DATABASE_NAME, "addedtable");
assertNotNull(partitions);
assertEquals(partitions.size(), 2);
testDropPartition(tableName, "id=10/name=aaa");
testDropPartition(tableName, "id=20/name=bbb");
- partitions = catalog.getAllPartitions(DEFAULT_DATABASE_NAME, "addedtable");
+ partitions = catalog.getPartitions(DEFAULT_DATABASE_NAME, "addedtable");
assertNotNull(partitions);
assertEquals(partitions.size(), 0);
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalogAgainstCaseSensitivity.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalogAgainstCaseSensitivity.java b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalogAgainstCaseSensitivity.java
index 6749bc8..bfff6b4 100644
--- a/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalogAgainstCaseSensitivity.java
+++ b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalogAgainstCaseSensitivity.java
@@ -197,7 +197,7 @@ public class TestCatalogAgainstCaseSensitivity {
// Test get partitions of a table
//////////////////////////////////////////////////////////////////////////////
- List<PartitionDescProto> partitionDescs = catalog.getAllPartitions("TestDatabase1", "TestPartition1");
+ List<PartitionDescProto> partitionDescs = catalog.getPartitions("TestDatabase1", "TestPartition1");
assertEquals(2, partitionDescs.size());
Map<String, PartitionDescProto> tablePartitionMap = new HashMap<>();
for (PartitionDescProto eachPartition : partitionDescs) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-common/src/main/java/org/apache/tajo/exception/ErrorMessages.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/exception/ErrorMessages.java b/tajo-common/src/main/java/org/apache/tajo/exception/ErrorMessages.java
index 456e275..9649644 100644
--- a/tajo-common/src/main/java/org/apache/tajo/exception/ErrorMessages.java
+++ b/tajo-common/src/main/java/org/apache/tajo/exception/ErrorMessages.java
@@ -67,7 +67,6 @@ public class ErrorMessages {
ADD_MESSAGE(UNDEFINED_FUNCTION, "function does not exist: %s", 1);
ADD_MESSAGE(UNDEFINED_PARTITION_METHOD, "table '%s' is not a partitioned table", 1);
ADD_MESSAGE(UNDEFINED_PARTITION, "partition '%s' does not exist", 1);
- ADD_MESSAGE(PARTITION_NOT_FOUND, "there is no partitions in '%s' table", 1);
ADD_MESSAGE(UNDEFINED_PARTITION_KEY, "'%s' column is not a partition key", 1);
ADD_MESSAGE(UNDEFINED_OPERATOR, "operator does not exist: '%s'", 1);
ADD_MESSAGE(UNDEFINED_INDEX_FOR_TABLE, "index ''%s' does not exist", 1);
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-common/src/main/java/org/apache/tajo/exception/PartitionNotFoundException.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/exception/PartitionNotFoundException.java b/tajo-common/src/main/java/org/apache/tajo/exception/PartitionNotFoundException.java
deleted file mode 100644
index 06de9f5..0000000
--- a/tajo-common/src/main/java/org/apache/tajo/exception/PartitionNotFoundException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.exception;
-
-import org.apache.tajo.error.Errors.ResultCode;
-import org.apache.tajo.rpc.protocolrecords.PrimitiveProtos.ReturnState;
-
-public class PartitionNotFoundException extends TajoException {
-
- private static final long serialVersionUID = 277182608283894939L;
-
- public PartitionNotFoundException(ReturnState state) {
- super(state);
- }
-
- public PartitionNotFoundException(String tableName) {
- super(ResultCode.PARTITION_NOT_FOUND, tableName);
- }
-}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-common/src/main/java/org/apache/tajo/exception/UndefinedPartitionMethodException.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/exception/UndefinedPartitionMethodException.java b/tajo-common/src/main/java/org/apache/tajo/exception/UndefinedPartitionMethodException.java
index ca61c70..459269c 100644
--- a/tajo-common/src/main/java/org/apache/tajo/exception/UndefinedPartitionMethodException.java
+++ b/tajo-common/src/main/java/org/apache/tajo/exception/UndefinedPartitionMethodException.java
@@ -29,7 +29,7 @@ public class UndefinedPartitionMethodException extends TajoException {
super(state);
}
- public UndefinedPartitionMethodException(String tableName) {
- super(ResultCode.UNDEFINED_PARTITION_METHOD, tableName);
+ public UndefinedPartitionMethodException(String partitionName) {
+ super(ResultCode.UNDEFINED_PARTITION_METHOD, partitionName);
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-common/src/main/proto/errors.proto
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/proto/errors.proto b/tajo-common/src/main/proto/errors.proto
index 264ddef..6a1780b 100644
--- a/tajo-common/src/main/proto/errors.proto
+++ b/tajo-common/src/main/proto/errors.proto
@@ -113,7 +113,6 @@ enum ResultCode {
UNDEFINED_PARTITION_METHOD = 521; // ?
UNDEFINED_OPERATOR = 522; // SQLState: 42883 (=UNDEFINED_FUNCTION)
UNDEFINED_PARTITION_KEY = 523; // ?
- PARTITION_NOT_FOUND = 524; // ?
DUPLICATE_TABLESPACE = 531;
DUPLICATE_DATABASE = 532; // SQLState: 42P04
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-core-tests/src/test/java/org/apache/tajo/engine/planner/TestEvalNodeToExprConverter.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/engine/planner/TestEvalNodeToExprConverter.java b/tajo-core-tests/src/test/java/org/apache/tajo/engine/planner/TestEvalNodeToExprConverter.java
deleted file mode 100644
index de2fa15..0000000
--- a/tajo-core-tests/src/test/java/org/apache/tajo/engine/planner/TestEvalNodeToExprConverter.java
+++ /dev/null
@@ -1,406 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.engine.planner;
-
-import org.apache.tajo.LocalTajoTestingUtility;
-import org.apache.tajo.QueryVars;
-import org.apache.tajo.TajoTestingCluster;
-import org.apache.tajo.algebra.*;
-import org.apache.tajo.benchmark.TPCH;
-import org.apache.tajo.catalog.*;
-import org.apache.tajo.engine.function.FunctionLoader;
-import org.apache.tajo.engine.query.QueryContext;
-import org.apache.tajo.exception.TajoException;
-import org.apache.tajo.parser.sql.SQLAnalyzer;
-import org.apache.tajo.plan.LogicalOptimizer;
-import org.apache.tajo.plan.LogicalPlan;
-import org.apache.tajo.plan.LogicalPlanner;
-import org.apache.tajo.plan.expr.AlgebraicUtil;
-import org.apache.tajo.plan.expr.EvalNode;
-import org.apache.tajo.plan.logical.LogicalNode;
-import org.apache.tajo.plan.logical.NodeType;
-import org.apache.tajo.plan.logical.ScanNode;
-import org.apache.tajo.plan.util.EvalNodeToExprConverter;
-import org.apache.tajo.plan.util.PlannerUtil;
-import org.apache.tajo.session.Session;
-import org.apache.tajo.storage.TablespaceManager;
-import org.apache.tajo.util.CommonTestingUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.util.Stack;
-
-import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
-import static org.apache.tajo.TajoConstants.DEFAULT_TABLESPACE_NAME;
-import static org.junit.Assert.*;
-
-public class TestEvalNodeToExprConverter {
- private static TajoTestingCluster util;
- private static CatalogService catalog;
- private static SQLAnalyzer sqlAnalyzer;
- private static LogicalPlanner planner;
- private static TPCH tpch;
- private static Session session = LocalTajoTestingUtility.createDummySession();
-
- @BeforeClass
- public static void setUp() throws Exception {
- util = new TajoTestingCluster();
- util.startCatalogCluster();
- catalog = util.getCatalogService();
- catalog.createTablespace(DEFAULT_TABLESPACE_NAME, "hdfs://localhost:1234");
- catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
-
- for (FunctionDesc funcDesc : FunctionLoader.findLegacyFunctions()) {
- catalog.createFunction(funcDesc);
- }
-
- // TPC-H Schema for Complex Queries
- String [] tpchTables = {
- "part", "supplier", "partsupp", "nation", "region", "lineitem"
- };
- tpch = new TPCH();
- tpch.loadSchemas();
- tpch.loadOutSchema();
- for (String table : tpchTables) {
- TableMeta m = CatalogUtil.newTableMeta("TEXT");
- TableDesc d = CatalogUtil.newTableDesc(
- CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, table), tpch.getSchema(table), m,
- CommonTestingUtil.getTestDir());
- catalog.createTable(d);
- }
-
- sqlAnalyzer = new SQLAnalyzer();
- planner = new LogicalPlanner(catalog, TablespaceManager.getInstance());
- }
-
- @AfterClass
- public static void tearDown() throws Exception {
- util.shutdownCatalogCluster();
- }
-
- static String[] QUERIES = {
- "select * from lineitem where L_ORDERKEY > 500", //0
- "select * from region where r_name = 'EUROPE'", //1
- "select * from lineitem where L_DISCOUNT >= 0.05 and L_DISCOUNT <= 0.07 OR L_QUANTITY < 24.0 ", //2
- "select * from lineitem where L_DISCOUNT between 0.06 - 0.01 and 0.08 + 0.02 and L_ORDERKEY < 24 ", //3
- "select * from lineitem where (case when L_DISCOUNT > 0.0 then L_DISCOUNT / L_TAX else null end) > 1.2 ", //4
- "select * from part where p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') " +
- "and p_size between 1 and 10", //5
- };
-
- private static QueryContext createQueryContext() {
- QueryContext qc = new QueryContext(util.getConfiguration(), session);
- qc.put(QueryVars.DEFAULT_SPACE_URI, "file:/");
- qc.put(QueryVars.DEFAULT_SPACE_ROOT_URI, "file:/");
- return qc;
- }
-
- @Test
- public final void testBinaryOperator1() throws CloneNotSupportedException, TajoException {
- QueryContext qc = createQueryContext();
-
- Expr expr = sqlAnalyzer.parse(QUERIES[0]);
-
- LogicalPlan plan = planner.createPlan(qc, expr);
- LogicalOptimizer optimizer = new LogicalOptimizer(util.getConfiguration(), catalog);
- optimizer.optimize(plan);
-
- LogicalNode node = plan.getRootBlock().getRoot();
- ScanNode scanNode = PlannerUtil.findTopNode(node, NodeType.SCAN);
-
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(scanNode.getTableName());
- convertor.visit(null, scanNode.getQual(), new Stack<EvalNode>());
-
- Expr resultExpr = convertor.getResult();
-
- BinaryOperator binaryOperator = AlgebraicUtil.findTopExpr(resultExpr, OpType.GreaterThan);
- assertNotNull(binaryOperator);
-
- ColumnReferenceExpr column = binaryOperator.getLeft();
- assertEquals("default.lineitem", column.getQualifier());
- assertEquals("l_orderkey", column.getName());
-
- LiteralValue literalValue = binaryOperator.getRight();
- assertEquals("500", literalValue.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Integer, literalValue.getValueType());
- }
-
- @Test
- public final void testBinaryOperator2() throws CloneNotSupportedException, TajoException {
- QueryContext qc = createQueryContext();
-
- Expr expr = sqlAnalyzer.parse(QUERIES[1]);
-
- LogicalPlan plan = planner.createPlan(qc, expr);
- LogicalOptimizer optimizer = new LogicalOptimizer(util.getConfiguration(), catalog);
- optimizer.optimize(plan);
-
- LogicalNode node = plan.getRootBlock().getRoot();
- ScanNode scanNode = PlannerUtil.findTopNode(node, NodeType.SCAN);
-
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(scanNode.getTableName());
- convertor.visit(null, scanNode.getQual(), new Stack<EvalNode>());
-
- Expr resultExpr = convertor.getResult();
- BinaryOperator equals = AlgebraicUtil.findTopExpr(resultExpr, OpType.Equals);
- assertNotNull(equals);
-
- ColumnReferenceExpr column = equals.getLeft();
- assertEquals("default.region", column.getQualifier());
- assertEquals("r_name", column.getName());
-
- LiteralValue literalValue = equals.getRight();
- assertEquals("EUROPE", literalValue.getValue());
- assertEquals(LiteralValue.LiteralType.String, literalValue.getValueType());
- }
-
- @Test
- public final void testBinaryOperator3() throws CloneNotSupportedException, TajoException {
- QueryContext qc = createQueryContext();
-
- Expr expr = sqlAnalyzer.parse(QUERIES[2]);
-
- LogicalPlan plan = planner.createPlan(qc, expr);
- LogicalOptimizer optimizer = new LogicalOptimizer(util.getConfiguration(), catalog);
- optimizer.optimize(plan);
-
- LogicalNode node = plan.getRootBlock().getRoot();
- ScanNode scanNode = PlannerUtil.findTopNode(node, NodeType.SCAN);
-
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(scanNode.getTableName());
- convertor.visit(null, scanNode.getQual(), new Stack<EvalNode>());
-
- Expr resultExpr = convertor.getResult();
-
- BinaryOperator greaterThanOrEquals = AlgebraicUtil.findTopExpr(resultExpr, OpType.GreaterThanOrEquals);
- assertNotNull(greaterThanOrEquals);
-
- ColumnReferenceExpr greaterThanOrEqualsLeft = greaterThanOrEquals.getLeft();
- assertEquals("default.lineitem", greaterThanOrEqualsLeft.getQualifier());
- assertEquals("l_discount", greaterThanOrEqualsLeft.getName());
-
- LiteralValue greaterThanOrEqualsRight = greaterThanOrEquals.getRight();
- assertEquals("0.05", greaterThanOrEqualsRight.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, greaterThanOrEqualsRight.getValueType());
-
- BinaryOperator lessThanOrEquals = AlgebraicUtil.findTopExpr(resultExpr, OpType.LessThanOrEquals);
- assertNotNull(lessThanOrEquals);
-
- ColumnReferenceExpr lessThanOrEqualsLeft = lessThanOrEquals.getLeft();
- assertEquals("default.lineitem", lessThanOrEqualsLeft.getQualifier());
- assertEquals("l_discount", lessThanOrEqualsLeft.getName());
-
- LiteralValue lessThanOrEqualsRight = lessThanOrEquals.getRight();
- assertEquals("0.07", lessThanOrEqualsRight.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, lessThanOrEqualsRight.getValueType());
-
- BinaryOperator lessThan = AlgebraicUtil.findTopExpr(resultExpr, OpType.LessThan);
- assertNotNull(lessThan);
-
- ColumnReferenceExpr lessThanLeft = lessThan.getLeft();
- assertEquals("default.lineitem", lessThanLeft.getQualifier());
- assertEquals("l_quantity", lessThanLeft.getName());
-
- LiteralValue lessThanRight = lessThan.getRight();
- assertEquals("24.0", lessThanRight.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, lessThanRight.getValueType());
-
- BinaryOperator leftExpr = new BinaryOperator(OpType.And, greaterThanOrEquals, lessThanOrEquals);
-
- BinaryOperator topExpr = AlgebraicUtil.findTopExpr(resultExpr, OpType.Or);
- assertEquals(leftExpr, topExpr.getLeft());
- assertEquals(lessThan, topExpr.getRight());
- }
-
- @Test
- public final void testBetweenPredicate() throws CloneNotSupportedException, TajoException {
- QueryContext qc = createQueryContext();
-
- Expr expr = sqlAnalyzer.parse(QUERIES[3]);
-
- LogicalPlan plan = planner.createPlan(qc, expr);
- LogicalOptimizer optimizer = new LogicalOptimizer(util.getConfiguration(), catalog);
- optimizer.optimize(plan);
-
- LogicalNode node = plan.getRootBlock().getRoot();
- ScanNode scanNode = PlannerUtil.findTopNode(node, NodeType.SCAN);
-
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(scanNode.getTableName());
- convertor.visit(null, scanNode.getQual(), new Stack<EvalNode>());
-
- Expr resultExpr = convertor.getResult();
-
- BinaryOperator binaryOperator = AlgebraicUtil.findTopExpr(resultExpr, OpType.LessThan);
- assertNotNull(binaryOperator);
- ColumnReferenceExpr column = binaryOperator.getLeft();
- assertEquals("default.lineitem", column.getQualifier());
- assertEquals("l_orderkey", column.getName());
-
- LiteralValue literalValue = binaryOperator.getRight();
- assertEquals("24", literalValue.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Integer, literalValue.getValueType());
-
- BetweenPredicate between = AlgebraicUtil.findTopExpr(resultExpr, OpType.Between);
- assertFalse(between.isNot());
- assertFalse(between.isSymmetric());
-
- ColumnReferenceExpr predicand = (ColumnReferenceExpr)between.predicand();
- assertEquals("default.lineitem", predicand.getQualifier());
- assertEquals("l_discount", predicand.getName());
-
- BinaryOperator begin = (BinaryOperator)between.begin();
- assertEquals(OpType.Minus, begin.getType());
- LiteralValue left = begin.getLeft();
- assertEquals("0.06", left.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, left.getValueType());
- LiteralValue right = begin.getRight();
- assertEquals("0.01", right.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, right.getValueType());
-
- BinaryOperator end = (BinaryOperator)between.end();
- assertEquals(OpType.Plus, end.getType());
- left = end.getLeft();
- assertEquals("0.08", left.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, left.getValueType());
- right = end.getRight();
- assertEquals("0.02", right.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, right.getValueType());
- }
-
- @Test
- public final void testCaseWhenPredicate() throws CloneNotSupportedException, TajoException {
- QueryContext qc = createQueryContext();
-
- Expr expr = sqlAnalyzer.parse(QUERIES[4]);
-
- LogicalPlan plan = planner.createPlan(qc, expr);
- LogicalOptimizer optimizer = new LogicalOptimizer(util.getConfiguration(), catalog);
- optimizer.optimize(plan);
-
- LogicalNode node = plan.getRootBlock().getRoot();
- ScanNode scanNode = PlannerUtil.findTopNode(node, NodeType.SCAN);
-
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(scanNode.getTableName());
- convertor.visit(null, scanNode.getQual(), new Stack<EvalNode>());
-
- Expr resultExpr = convertor.getResult();
-
- CaseWhenPredicate caseWhen = AlgebraicUtil.findTopExpr(resultExpr, OpType.CaseWhen);
- assertNotNull(caseWhen);
-
- CaseWhenPredicate.WhenExpr[] whenExprs = new CaseWhenPredicate.WhenExpr[1];
- caseWhen.getWhens().toArray(whenExprs);
-
- BinaryOperator condition = (BinaryOperator) whenExprs[0].getCondition();
- assertEquals(OpType.GreaterThan, condition.getType());
-
- ColumnReferenceExpr conditionLeft = condition.getLeft();
- assertEquals("default.lineitem", conditionLeft.getQualifier());
- assertEquals("l_discount", conditionLeft.getName());
-
- LiteralValue conditionRight = condition.getRight();
- assertEquals("0.0", conditionRight.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, conditionRight.getValueType());
-
- BinaryOperator result = (BinaryOperator) whenExprs[0].getResult();
- assertEquals(OpType.Divide, result.getType());
- ColumnReferenceExpr resultLeft = result.getLeft();
- assertEquals("default.lineitem", resultLeft.getQualifier());
- assertEquals("l_discount", resultLeft.getName());
-
- ColumnReferenceExpr resultRight = result.getRight();
- assertEquals("default.lineitem", resultRight.getQualifier());
- assertEquals("l_tax", resultRight.getName());
-
- BinaryOperator greaterThan = AlgebraicUtil.findTopExpr(resultExpr, OpType.GreaterThan);
- assertNotNull(greaterThan);
-
- assertEquals(greaterThan.getLeft(), caseWhen);
-
- LiteralValue binaryOperatorRight = greaterThan.getRight();
- assertEquals("1.2", binaryOperatorRight.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Float, conditionRight.getValueType());
- }
-
- @Test
- public final void testThreeFilters() throws CloneNotSupportedException, TajoException {
- QueryContext qc = createQueryContext();
-
- Expr expr = sqlAnalyzer.parse(QUERIES[5]);
-
- LogicalPlan plan = planner.createPlan(qc, expr);
- LogicalOptimizer optimizer = new LogicalOptimizer(util.getConfiguration(), catalog);
- optimizer.optimize(plan);
-
- LogicalNode node = plan.getRootBlock().getRoot();
- ScanNode scanNode = PlannerUtil.findTopNode(node, NodeType.SCAN);
-
- EvalNodeToExprConverter convertor = new EvalNodeToExprConverter(scanNode.getTableName());
- convertor.visit(null, scanNode.getQual(), new Stack<EvalNode>());
-
- Expr resultExpr = convertor.getResult();
-
- BetweenPredicate between = AlgebraicUtil.findTopExpr(resultExpr, OpType.Between);
- assertFalse(between.isNot());
- assertFalse(between.isSymmetric());
-
- ColumnReferenceExpr predicand = (ColumnReferenceExpr)between.predicand();
- assertEquals("default.part", predicand.getQualifier());
- assertEquals("p_size", predicand.getName());
-
- LiteralValue begin = (LiteralValue)between.begin();
- assertEquals("1", begin.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Integer, begin.getValueType());
-
- LiteralValue end = (LiteralValue)between.end();
- assertEquals("10", end.getValue());
- assertEquals(LiteralValue.LiteralType.Unsigned_Integer, end.getValueType());
-
- BinaryOperator equals = AlgebraicUtil.findTopExpr(resultExpr, OpType.Equals);
- assertNotNull(equals);
-
- ColumnReferenceExpr equalsLeft = equals.getLeft();
- assertEquals("default.part", equalsLeft.getQualifier());
- assertEquals("p_brand", equalsLeft.getName());
-
- LiteralValue equalsRight = equals.getRight();
- assertEquals("Brand#23", equalsRight.getValue());
- assertEquals(LiteralValue.LiteralType.String, equalsRight.getValueType());
-
- InPredicate inPredicate = AlgebraicUtil.findTopExpr(resultExpr, OpType.InPredicate);
- assertNotNull(inPredicate);
-
- ValueListExpr valueList = (ValueListExpr)inPredicate.getInValue();
- assertEquals(4, valueList.getValues().length);
- for(int i = 0; i < valueList.getValues().length; i++) {
- LiteralValue literalValue = (LiteralValue) valueList.getValues()[i];
-
- if (i == 0) {
- assertEquals("MED BAG", literalValue.getValue());
- } else if (i == 1) {
- assertEquals("MED BOX", literalValue.getValue());
- } else if (i == 2) {
- assertEquals("MED PKG", literalValue.getValue());
- } else {
- assertEquals("MED PACK", literalValue.getValue());
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/tajo/blob/e1c2d352/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
----------------------------------------------------------------------
diff --git a/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java b/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
index 1c6a2f2..8339ea7 100644
--- a/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
+++ b/tajo-core-tests/src/test/java/org/apache/tajo/engine/query/TestAlterTable.java
@@ -88,7 +88,7 @@ public class TestAlterTable extends QueryTestCaseBase {
executeDDL("alter_table_add_partition1.sql", null);
executeDDL("alter_table_add_partition2.sql", null);
- List<CatalogProtos.PartitionDescProto> partitions = catalog.getAllPartitions("TestAlterTable", "partitioned_table");
+ List<CatalogProtos.PartitionDescProto> partitions = catalog.getPartitions("TestAlterTable", "partitioned_table");
assertNotNull(partitions);
assertEquals(partitions.size(), 1);
assertEquals(partitions.get(0).getPartitionName(), "col3=1/col4=2");
@@ -106,7 +106,7 @@ public class TestAlterTable extends QueryTestCaseBase {
executeDDL("alter_table_drop_partition1.sql", null);
executeDDL("alter_table_drop_partition2.sql", null);
- partitions = catalog.getAllPartitions("TestAlterTable", "partitioned_table");
+ partitions = catalog.getPartitions("TestAlterTable", "partitioned_table");
assertNotNull(partitions);
assertEquals(partitions.size(), 0);
assertFalse(fs.exists(partitionPath));