You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tajo.apache.org by ji...@apache.org on 2014/12/31 11:51:22 UTC
[1/8] tajo git commit: TAJO-269: Protocol buffer De/Serialization for
LogicalNode.
Repository: tajo
Updated Branches:
refs/heads/index_support df2ff2dd7 -> 8e52ed43a
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeDeserializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeDeserializer.java
new file mode 100644
index 0000000..5cbed7e
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeDeserializer.java
@@ -0,0 +1,678 @@
+/*
+ * Lisensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.serder;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.algebra.JoinType;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.SortSpec;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.partition.PartitionMethodDesc;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.exception.UnimplementedException;
+import org.apache.tajo.plan.Target;
+import org.apache.tajo.plan.expr.AggregationFunctionCallEval;
+import org.apache.tajo.plan.expr.EvalNode;
+import org.apache.tajo.plan.expr.FieldEval;
+import org.apache.tajo.plan.expr.WindowFunctionEval;
+import org.apache.tajo.plan.logical.*;
+import org.apache.tajo.util.KeyValueSet;
+import org.apache.tajo.util.TUtil;
+
+import java.util.*;
+
+/**
+ * It deserializes a list of serialized logical nodes into a logical node tree.
+ */
+public class LogicalNodeDeserializer {
+ private static final LogicalNodeDeserializer instance;
+
+ static {
+ instance = new LogicalNodeDeserializer();
+ }
+
+ /**
+ * Deserialize a list of nodes into a logical node tree.
+ *
+ * @param context QueryContext
+ * @param tree LogicalNodeTree which contains a list of serialized logical nodes.
+ * @return A logical node tree
+ */
+ public static LogicalNode deserialize(OverridableConf context, PlanProto.LogicalNodeTree tree) {
+ Map<Integer, LogicalNode> nodeMap = Maps.newHashMap();
+
+ // sort serialized logical nodes in an ascending order of their sids
+ List<PlanProto.LogicalNode> nodeList = Lists.newArrayList(tree.getNodesList());
+ Collections.sort(nodeList, new Comparator<PlanProto.LogicalNode>() {
+ @Override
+ public int compare(PlanProto.LogicalNode o1, PlanProto.LogicalNode o2) {
+ return o1.getVisitSeq() - o2.getVisitSeq();
+ }
+ });
+
+ LogicalNode current = null;
+
+ // The sorted order is the same of a postfix traverse order.
+ // So, it sequentially transforms each serialized node into a LogicalNode instance in a postfix order of
+ // the original logical node tree.
+
+ Iterator<PlanProto.LogicalNode> it = nodeList.iterator();
+ while (it.hasNext()) {
+ PlanProto.LogicalNode protoNode = it.next();
+
+ switch (protoNode.getType()) {
+ case ROOT:
+ current = convertRoot(nodeMap, protoNode);
+ break;
+ case SET_SESSION:
+ current = convertSetSession(protoNode);
+ break;
+ case EXPRS:
+ current = convertEvalExpr(context, protoNode);
+ break;
+ case PROJECTION:
+ current = convertProjection(context, nodeMap, protoNode);
+ break;
+ case LIMIT:
+ current = convertLimit(nodeMap, protoNode);
+ break;
+ case SORT:
+ current = convertSort(nodeMap, protoNode);
+ break;
+ case WINDOW_AGG:
+ current = convertWindowAgg(context, nodeMap, protoNode);
+ break;
+ case HAVING:
+ current = convertHaving(context, nodeMap, protoNode);
+ break;
+ case GROUP_BY:
+ current = convertGroupby(context, nodeMap, protoNode);
+ break;
+ case DISTINCT_GROUP_BY:
+ current = convertDistinctGroupby(context, nodeMap, protoNode);
+ break;
+ case SELECTION:
+ current = convertFilter(context, nodeMap, protoNode);
+ break;
+ case JOIN:
+ current = convertJoin(context, nodeMap, protoNode);
+ break;
+ case TABLE_SUBQUERY:
+ current = convertTableSubQuery(context, nodeMap, protoNode);
+ break;
+ case UNION:
+ current = convertUnion(nodeMap, protoNode);
+ break;
+ case PARTITIONS_SCAN:
+ current = convertPartitionScan(context, protoNode);
+ break;
+ case SCAN:
+ current = convertScan(context, protoNode);
+ break;
+
+ case CREATE_TABLE:
+ current = convertCreateTable(nodeMap, protoNode);
+ break;
+ case INSERT:
+ current = convertInsert(nodeMap, protoNode);
+ break;
+ case DROP_TABLE:
+ current = convertDropTable(protoNode);
+ break;
+
+ case CREATE_DATABASE:
+ current = convertCreateDatabase(protoNode);
+ break;
+ case DROP_DATABASE:
+ current = convertDropDatabase(protoNode);
+ break;
+
+ case ALTER_TABLESPACE:
+ current = convertAlterTablespace(protoNode);
+ break;
+ case ALTER_TABLE:
+ current = convertAlterTable(protoNode);
+ break;
+ case TRUNCATE_TABLE:
+ current = convertTruncateTable(protoNode);
+ break;
+
+ default:
+ throw new RuntimeException("Unknown NodeType: " + protoNode.getType().name());
+ }
+
+ nodeMap.put(protoNode.getVisitSeq(), current);
+ }
+
+ return current;
+ }
+
+ private static LogicalRootNode convertRoot(Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.RootNode rootProto = protoNode.getRoot();
+
+ LogicalRootNode root = new LogicalRootNode(protoNode.getNodeId());
+ root.setChild(nodeMap.get(rootProto.getChildSeq()));
+ if (protoNode.hasInSchema()) {
+ root.setInSchema(convertSchema(protoNode.getInSchema()));
+ }
+ if (protoNode.hasOutSchema()) {
+ root.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ }
+
+ return root;
+ }
+
+ private static SetSessionNode convertSetSession(PlanProto.LogicalNode protoNode) {
+ PlanProto.SetSessionNode setSessionProto = protoNode.getSetSession();
+
+ SetSessionNode setSession = new SetSessionNode(protoNode.getNodeId());
+ setSession.init(setSessionProto.getName(), setSessionProto.hasValue() ? setSessionProto.getValue() : null);
+
+ return setSession;
+ }
+
+ private static EvalExprNode convertEvalExpr(OverridableConf context, PlanProto.LogicalNode protoNode) {
+ PlanProto.EvalExprNode evalExprProto = protoNode.getExprEval();
+
+ EvalExprNode evalExpr = new EvalExprNode(protoNode.getNodeId());
+ evalExpr.setInSchema(convertSchema(protoNode.getInSchema()));
+ evalExpr.setTargets(convertTargets(context, evalExprProto.getTargetsList()));
+
+ return evalExpr;
+ }
+
+ private static ProjectionNode convertProjection(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.ProjectionNode projectionProto = protoNode.getProjection();
+
+ ProjectionNode projectionNode = new ProjectionNode(protoNode.getNodeId());
+ projectionNode.init(projectionProto.getDistinct(), convertTargets(context, projectionProto.getTargetsList()));
+ projectionNode.setChild(nodeMap.get(projectionProto.getChildSeq()));
+ projectionNode.setInSchema(convertSchema(protoNode.getInSchema()));
+ projectionNode.setOutSchema(convertSchema(protoNode.getOutSchema()));
+
+ return projectionNode;
+ }
+
+ private static LimitNode convertLimit(Map<Integer, LogicalNode> nodeMap, PlanProto.LogicalNode protoNode) {
+ PlanProto.LimitNode limitProto = protoNode.getLimit();
+
+ LimitNode limitNode = new LimitNode(protoNode.getNodeId());
+ limitNode.setChild(nodeMap.get(limitProto.getChildSeq()));
+ limitNode.setInSchema(convertSchema(protoNode.getInSchema()));
+ limitNode.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ limitNode.setFetchFirst(limitProto.getFetchFirstNum());
+
+ return limitNode;
+ }
+
+ private static SortNode convertSort(Map<Integer, LogicalNode> nodeMap, PlanProto.LogicalNode protoNode) {
+ PlanProto.SortNode sortProto = protoNode.getSort();
+
+ SortNode sortNode = new SortNode(protoNode.getNodeId());
+ sortNode.setChild(nodeMap.get(sortProto.getChildSeq()));
+ sortNode.setInSchema(convertSchema(protoNode.getInSchema()));
+ sortNode.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ sortNode.setSortSpecs(convertSortSpecs(sortProto.getSortSpecsList()));
+
+ return sortNode;
+ }
+
+ private static HavingNode convertHaving(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.FilterNode havingProto = protoNode.getFilter();
+
+ HavingNode having = new HavingNode(protoNode.getNodeId());
+ having.setChild(nodeMap.get(havingProto.getChildSeq()));
+ having.setQual(EvalNodeDeserializer.deserialize(context, havingProto.getQual()));
+ having.setInSchema(convertSchema(protoNode.getInSchema()));
+ having.setOutSchema(convertSchema(protoNode.getOutSchema()));
+
+ return having;
+ }
+
+ private static WindowAggNode convertWindowAgg(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.WindowAggNode windowAggProto = protoNode.getWindowAgg();
+
+ WindowAggNode windowAgg = new WindowAggNode(protoNode.getNodeId());
+ windowAgg.setChild(nodeMap.get(windowAggProto.getChildSeq()));
+
+ if (windowAggProto.getPartitionKeysCount() > 0) {
+ windowAgg.setPartitionKeys(convertColumns(windowAggProto.getPartitionKeysList()));
+ }
+
+ if (windowAggProto.getWindowFunctionsCount() > 0) {
+ windowAgg.setWindowFunctions(convertWindowFunccEvals(context, windowAggProto.getWindowFunctionsList()));
+ }
+
+ windowAgg.setDistinct(windowAggProto.getDistinct());
+
+ if (windowAggProto.getSortSpecsCount() > 0) {
+ windowAgg.setSortSpecs(convertSortSpecs(windowAggProto.getSortSpecsList()));
+ }
+
+ if (windowAggProto.getTargetsCount() > 0) {
+ windowAgg.setTargets(convertTargets(context, windowAggProto.getTargetsList()));
+ }
+
+ windowAgg.setInSchema(convertSchema(protoNode.getInSchema()));
+ windowAgg.setOutSchema(convertSchema(protoNode.getOutSchema()));
+
+ return windowAgg;
+ }
+
+ private static GroupbyNode convertGroupby(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.GroupbyNode groupbyProto = protoNode.getGroupby();
+
+ GroupbyNode groupby = new GroupbyNode(protoNode.getNodeId());
+ groupby.setChild(nodeMap.get(groupbyProto.getChildSeq()));
+ groupby.setDistinct(groupbyProto.getDistinct());
+
+ if (groupbyProto.getGroupingKeysCount() > 0) {
+ groupby.setGroupingColumns(convertColumns(groupbyProto.getGroupingKeysList()));
+ }
+ if (groupbyProto.getAggFunctionsCount() > 0) {
+ groupby.setAggFunctions(convertAggFuncCallEvals(context, groupbyProto.getAggFunctionsList()));
+ }
+ if (groupbyProto.getTargetsCount() > 0) {
+ groupby.setTargets(convertTargets(context, groupbyProto.getTargetsList()));
+ }
+
+ groupby.setInSchema(convertSchema(protoNode.getInSchema()));
+ groupby.setOutSchema(convertSchema(protoNode.getOutSchema()));
+
+ return groupby;
+ }
+
+ private static DistinctGroupbyNode convertDistinctGroupby(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.DistinctGroupbyNode distinctGroupbyProto = protoNode.getDistinctGroupby();
+
+ DistinctGroupbyNode distinctGroupby = new DistinctGroupbyNode(protoNode.getNodeId());
+ distinctGroupby.setChild(nodeMap.get(distinctGroupbyProto.getChildSeq()));
+
+ if (distinctGroupbyProto.hasGroupbyNode()) {
+ distinctGroupby.setGroupbyPlan(convertGroupby(context, nodeMap, distinctGroupbyProto.getGroupbyNode()));
+ }
+
+ if (distinctGroupbyProto.getSubPlansCount() > 0) {
+ List<GroupbyNode> subPlans = TUtil.newList();
+ for (int i = 0; i < distinctGroupbyProto.getSubPlansCount(); i++) {
+ subPlans.add(convertGroupby(context, nodeMap, distinctGroupbyProto.getSubPlans(i)));
+ }
+ distinctGroupby.setSubPlans(subPlans);
+ }
+
+ if (distinctGroupbyProto.getGroupingKeysCount() > 0) {
+ distinctGroupby.setGroupingColumns(convertColumns(distinctGroupbyProto.getGroupingKeysList()));
+ }
+ if (distinctGroupbyProto.getAggFunctionsCount() > 0) {
+ distinctGroupby.setAggFunctions(convertAggFuncCallEvals(context, distinctGroupbyProto.getAggFunctionsList()));
+ }
+ if (distinctGroupbyProto.getTargetsCount() > 0) {
+ distinctGroupby.setTargets(convertTargets(context, distinctGroupbyProto.getTargetsList()));
+ }
+ int [] resultColumnIds = new int[distinctGroupbyProto.getResultIdCount()];
+ for (int i = 0; i < distinctGroupbyProto.getResultIdCount(); i++) {
+ resultColumnIds[i] = distinctGroupbyProto.getResultId(i);
+ }
+ distinctGroupby.setResultColumnIds(resultColumnIds);
+
+ // TODO - in distinct groupby, output and target are not matched to each other. It does not follow the convention.
+ distinctGroupby.setInSchema(convertSchema(protoNode.getInSchema()));
+ distinctGroupby.setOutSchema(convertSchema(protoNode.getOutSchema()));
+
+ return distinctGroupby;
+ }
+
+ private static JoinNode convertJoin(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.JoinNode joinProto = protoNode.getJoin();
+
+ JoinNode join = new JoinNode(protoNode.getNodeId());
+ join.setLeftChild(nodeMap.get(joinProto.getLeftChildSeq()));
+ join.setRightChild(nodeMap.get(joinProto.getRightChilSeq()));
+ join.setJoinType(convertJoinType(joinProto.getJoinType()));
+ join.setInSchema(convertSchema(protoNode.getInSchema()));
+ join.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ if (joinProto.hasJoinQual()) {
+ join.setJoinQual(EvalNodeDeserializer.deserialize(context, joinProto.getJoinQual()));
+ }
+ if (joinProto.getExistsTargets()) {
+ join.setTargets(convertTargets(context, joinProto.getTargetsList()));
+ }
+
+ return join;
+ }
+
+ private static SelectionNode convertFilter(OverridableConf context, Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.FilterNode filterProto = protoNode.getFilter();
+
+ SelectionNode selection = new SelectionNode(protoNode.getNodeId());
+ selection.setInSchema(convertSchema(protoNode.getInSchema()));
+ selection.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ selection.setChild(nodeMap.get(filterProto.getChildSeq()));
+ selection.setQual(EvalNodeDeserializer.deserialize(context, filterProto.getQual()));
+
+ return selection;
+ }
+
+ private static UnionNode convertUnion(Map<Integer, LogicalNode> nodeMap, PlanProto.LogicalNode protoNode) {
+ PlanProto.UnionNode unionProto = protoNode.getUnion();
+
+ UnionNode union = new UnionNode(protoNode.getNodeId());
+ union.setInSchema(convertSchema(protoNode.getInSchema()));
+ union.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ union.setLeftChild(nodeMap.get(unionProto.getLeftChildSeq()));
+ union.setRightChild(nodeMap.get(unionProto.getRightChildSeq()));
+
+ return union;
+ }
+
+ private static ScanNode convertScan(OverridableConf context, PlanProto.LogicalNode protoNode) {
+ ScanNode scan = new ScanNode(protoNode.getNodeId());
+ fillScanNode(context, protoNode, scan);
+
+ return scan;
+ }
+
+ private static void fillScanNode(OverridableConf context, PlanProto.LogicalNode protoNode, ScanNode scan) {
+ PlanProto.ScanNode scanProto = protoNode.getScan();
+ if (scanProto.hasAlias()) {
+ scan.init(new TableDesc(scanProto.getTable()), scanProto.getAlias());
+ } else {
+ scan.init(new TableDesc(scanProto.getTable()));
+ }
+
+ if (scanProto.getExistTargets()) {
+ scan.setTargets(convertTargets(context, scanProto.getTargetsList()));
+ }
+
+ if (scanProto.hasQual()) {
+ scan.setQual(EvalNodeDeserializer.deserialize(context, scanProto.getQual()));
+ }
+
+ scan.setInSchema(convertSchema(protoNode.getInSchema()));
+ scan.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ }
+
+ private static PartitionedTableScanNode convertPartitionScan(OverridableConf context, PlanProto.LogicalNode protoNode) {
+ PartitionedTableScanNode partitionedScan = new PartitionedTableScanNode(protoNode.getNodeId());
+ fillScanNode(context, protoNode, partitionedScan);
+
+ PlanProto.PartitionScanSpec partitionScanProto = protoNode.getPartitionScan();
+ Path [] paths = new Path[partitionScanProto.getPathsCount()];
+ for (int i = 0; i < partitionScanProto.getPathsCount(); i++) {
+ paths[i] = new Path(partitionScanProto.getPaths(i));
+ }
+ partitionedScan.setInputPaths(paths);
+ return partitionedScan;
+ }
+
+ private static TableSubQueryNode convertTableSubQuery(OverridableConf context,
+ Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.TableSubQueryNode proto = protoNode.getTableSubQuery();
+
+ TableSubQueryNode tableSubQuery = new TableSubQueryNode(protoNode.getNodeId());
+ tableSubQuery.init(proto.getTableName(), nodeMap.get(proto.getChildSeq()));
+ tableSubQuery.setInSchema(convertSchema(protoNode.getInSchema()));
+ if (proto.getTargetsCount() > 0) {
+ tableSubQuery.setTargets(convertTargets(context, proto.getTargetsList()));
+ }
+
+ return tableSubQuery;
+ }
+
+ private static CreateTableNode convertCreateTable(Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.PersistentStoreNode persistentStoreProto = protoNode.getPersistentStore();
+ PlanProto.StoreTableNodeSpec storeTableNodeSpec = protoNode.getStoreTable();
+ PlanProto.CreateTableNodeSpec createTableNodeSpec = protoNode.getCreateTable();
+
+ CreateTableNode createTable = new CreateTableNode(protoNode.getNodeId());
+ if (protoNode.hasInSchema()) {
+ createTable.setInSchema(convertSchema(protoNode.getInSchema()));
+ }
+ if (protoNode.hasOutSchema()) {
+ createTable.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ }
+ createTable.setChild(nodeMap.get(persistentStoreProto.getChildSeq()));
+ createTable.setStorageType(persistentStoreProto.getStorageType());
+ createTable.setOptions(new KeyValueSet(persistentStoreProto.getTableProperties()));
+
+ createTable.setTableName(storeTableNodeSpec.getTableName());
+ if (storeTableNodeSpec.hasPartitionMethod()) {
+ createTable.setPartitionMethod(new PartitionMethodDesc(storeTableNodeSpec.getPartitionMethod()));
+ }
+
+ createTable.setTableSchema(convertSchema(createTableNodeSpec.getSchema()));
+ createTable.setExternal(createTableNodeSpec.getExternal());
+ if (createTableNodeSpec.getExternal() && createTableNodeSpec.hasPath()) {
+ createTable.setPath(new Path(createTableNodeSpec.getPath()));
+ }
+ createTable.setIfNotExists(createTableNodeSpec.getIfNotExists());
+
+ return createTable;
+ }
+
+ private static InsertNode convertInsert(Map<Integer, LogicalNode> nodeMap,
+ PlanProto.LogicalNode protoNode) {
+ PlanProto.PersistentStoreNode persistentStoreProto = protoNode.getPersistentStore();
+ PlanProto.StoreTableNodeSpec storeTableNodeSpec = protoNode.getStoreTable();
+ PlanProto.InsertNodeSpec insertNodeSpec = protoNode.getInsert();
+
+ InsertNode insertNode = new InsertNode(protoNode.getNodeId());
+ if (protoNode.hasInSchema()) {
+ insertNode.setInSchema(convertSchema(protoNode.getInSchema()));
+ }
+ if (protoNode.hasOutSchema()) {
+ insertNode.setOutSchema(convertSchema(protoNode.getOutSchema()));
+ }
+ insertNode.setChild(nodeMap.get(persistentStoreProto.getChildSeq()));
+ insertNode.setStorageType(persistentStoreProto.getStorageType());
+ insertNode.setOptions(new KeyValueSet(persistentStoreProto.getTableProperties()));
+
+ if (storeTableNodeSpec.hasTableName()) {
+ insertNode.setTableName(storeTableNodeSpec.getTableName());
+ }
+ if (storeTableNodeSpec.hasPartitionMethod()) {
+ insertNode.setPartitionMethod(new PartitionMethodDesc(storeTableNodeSpec.getPartitionMethod()));
+ }
+
+ insertNode.setOverwrite(insertNodeSpec.getOverwrite());
+ insertNode.setTableSchema(convertSchema(insertNodeSpec.getTableSchema()));
+ if (insertNodeSpec.hasTargetSchema()) {
+ insertNode.setTargetSchema(convertSchema(insertNodeSpec.getTargetSchema()));
+ }
+ if (insertNodeSpec.hasProjectedSchema()) {
+ insertNode.setProjectedSchema(convertSchema(insertNodeSpec.getProjectedSchema()));
+ }
+ if (insertNodeSpec.hasPath()) {
+ insertNode.setPath(new Path(insertNodeSpec.getPath()));
+ }
+
+ return insertNode;
+ }
+
+ private static DropTableNode convertDropTable(PlanProto.LogicalNode protoNode) {
+ DropTableNode dropTable = new DropTableNode(protoNode.getNodeId());
+
+ PlanProto.DropTableNode dropTableProto = protoNode.getDropTable();
+ dropTable.init(dropTableProto.getTableName(), dropTableProto.getIfExists(), dropTableProto.getPurge());
+
+ return dropTable;
+ }
+
+ private static CreateDatabaseNode convertCreateDatabase(PlanProto.LogicalNode protoNode) {
+ CreateDatabaseNode createDatabase = new CreateDatabaseNode(protoNode.getNodeId());
+
+ PlanProto.CreateDatabaseNode createDatabaseProto = protoNode.getCreateDatabase();
+ createDatabase.init(createDatabaseProto.getDbName(), createDatabaseProto.getIfNotExists());
+
+ return createDatabase;
+ }
+
+ private static DropDatabaseNode convertDropDatabase(PlanProto.LogicalNode protoNode) {
+ DropDatabaseNode dropDatabase = new DropDatabaseNode(protoNode.getNodeId());
+
+ PlanProto.DropDatabaseNode dropDatabaseProto = protoNode.getDropDatabase();
+ dropDatabase.init(dropDatabaseProto.getDbName(), dropDatabaseProto.getIfExists());
+
+ return dropDatabase;
+ }
+
+ private static AlterTablespaceNode convertAlterTablespace(PlanProto.LogicalNode protoNode) {
+ AlterTablespaceNode alterTablespace = new AlterTablespaceNode(protoNode.getNodeId());
+
+ PlanProto.AlterTablespaceNode alterTablespaceProto = protoNode.getAlterTablespace();
+ alterTablespace.setTablespaceName(alterTablespaceProto.getTableSpaceName());
+
+ switch (alterTablespaceProto.getSetType()) {
+ case LOCATION:
+ alterTablespace.setLocation(alterTablespaceProto.getSetLocation().getLocation());
+ break;
+ default:
+ throw new UnimplementedException("Unknown SET type in ALTER TABLE: " + alterTablespaceProto.getSetType().name());
+ }
+
+ return alterTablespace;
+ }
+
+ private static AlterTableNode convertAlterTable(PlanProto.LogicalNode protoNode) {
+ AlterTableNode alterTable = new AlterTableNode(protoNode.getNodeId());
+
+ PlanProto.AlterTableNode alterTableProto = protoNode.getAlterTable();
+ alterTable.setTableName(alterTableProto.getTableName());
+
+ switch (alterTableProto.getSetType()) {
+ case RENAME_TABLE:
+ alterTable.setNewTableName(alterTableProto.getRenameTable().getNewName());
+ break;
+ case ADD_COLUMN:
+ alterTable.setAddNewColumn(new Column(alterTableProto.getAddColumn().getAddColumn()));
+ break;
+ case RENAME_COLUMN:
+ alterTable.setColumnName(alterTableProto.getRenameColumn().getOldName());
+ alterTable.setNewColumnName(alterTableProto.getRenameColumn().getNewName());
+ break;
+ default:
+ throw new UnimplementedException("Unknown SET type in ALTER TABLE: " + alterTableProto.getSetType().name());
+ }
+
+ return alterTable;
+ }
+
+ private static TruncateTableNode convertTruncateTable(PlanProto.LogicalNode protoNode) {
+ TruncateTableNode truncateTable = new TruncateTableNode(protoNode.getNodeId());
+
+ PlanProto.TruncateTableNode truncateTableProto = protoNode.getTruncateTableNode();
+ truncateTable.setTableNames(truncateTableProto.getTableNamesList());
+
+ return truncateTable;
+ }
+
+ private static AggregationFunctionCallEval [] convertAggFuncCallEvals(OverridableConf context,
+ List<PlanProto.EvalNodeTree> evalTrees) {
+ AggregationFunctionCallEval [] aggFuncs = new AggregationFunctionCallEval[evalTrees.size()];
+ for (int i = 0; i < aggFuncs.length; i++) {
+ aggFuncs[i] = (AggregationFunctionCallEval) EvalNodeDeserializer.deserialize(context, evalTrees.get(i));
+ }
+ return aggFuncs;
+ }
+
+ private static WindowFunctionEval[] convertWindowFunccEvals(OverridableConf context,
+ List<PlanProto.EvalNodeTree> evalTrees) {
+ WindowFunctionEval[] winFuncEvals = new WindowFunctionEval[evalTrees.size()];
+ for (int i = 0; i < winFuncEvals.length; i++) {
+ winFuncEvals[i] = (WindowFunctionEval) EvalNodeDeserializer.deserialize(context, evalTrees.get(i));
+ }
+ return winFuncEvals;
+ }
+
+ public static Schema convertSchema(CatalogProtos.SchemaProto proto) {
+ return new Schema(proto);
+ }
+
+ public static Column[] convertColumns(List<CatalogProtos.ColumnProto> columnProtos) {
+ Column [] columns = new Column[columnProtos.size()];
+ for (int i = 0; i < columns.length; i++) {
+ columns[i] = new Column(columnProtos.get(i));
+ }
+ return columns;
+ }
+
+ public static Target[] convertTargets(OverridableConf context, List<PlanProto.Target> targetsProto) {
+ Target [] targets = new Target[targetsProto.size()];
+ for (int i = 0; i < targets.length; i++) {
+ PlanProto.Target targetProto = targetsProto.get(i);
+ EvalNode evalNode = EvalNodeDeserializer.deserialize(context, targetProto.getExpr());
+ if (targetProto.hasAlias()) {
+ targets[i] = new Target(evalNode, targetProto.getAlias());
+ } else {
+ targets[i] = new Target((FieldEval) evalNode);
+ }
+ }
+ return targets;
+ }
+
+ public static SortSpec[] convertSortSpecs(List<CatalogProtos.SortSpecProto> sortSpecProtos) {
+ SortSpec[] sortSpecs = new SortSpec[sortSpecProtos.size()];
+ int i = 0;
+ for (CatalogProtos.SortSpecProto proto : sortSpecProtos) {
+ sortSpecs[i++] = new SortSpec(proto);
+ }
+ return sortSpecs;
+ }
+
+ public static JoinType convertJoinType(PlanProto.JoinType type) {
+ switch (type) {
+ case CROSS_JOIN:
+ return JoinType.CROSS;
+ case INNER_JOIN:
+ return JoinType.INNER;
+ case LEFT_OUTER_JOIN:
+ return JoinType.LEFT_OUTER;
+ case RIGHT_OUTER_JOIN:
+ return JoinType.RIGHT_OUTER;
+ case FULL_OUTER_JOIN:
+ return JoinType.FULL_OUTER;
+ case LEFT_SEMI_JOIN:
+ return JoinType.LEFT_SEMI;
+ case RIGHT_SEMI_JOIN:
+ return JoinType.RIGHT_SEMI;
+ case LEFT_ANTI_JOIN:
+ return JoinType.LEFT_ANTI;
+ case RIGHT_ANTI_JOIN:
+ return JoinType.RIGHT_ANTI;
+ case UNION_JOIN:
+ return JoinType.UNION;
+ default:
+ throw new RuntimeException("Unknown JoinType: " + type.name());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeSerializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeSerializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeSerializer.java
new file mode 100644
index 0000000..39a13ba
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/LogicalNodeSerializer.java
@@ -0,0 +1,724 @@
+/*
+ * Lisensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.serder;
+
+import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.Path;
+import org.apache.tajo.algebra.JoinType;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.exception.UnimplementedException;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.PlanningException;
+import org.apache.tajo.plan.Target;
+import org.apache.tajo.plan.logical.*;
+import org.apache.tajo.plan.serder.PlanProto.AlterTableNode.AddColumn;
+import org.apache.tajo.plan.serder.PlanProto.AlterTableNode.RenameColumn;
+import org.apache.tajo.plan.serder.PlanProto.AlterTableNode.RenameTable;
+import org.apache.tajo.plan.serder.PlanProto.AlterTablespaceNode.SetLocation;
+import org.apache.tajo.plan.serder.PlanProto.LogicalNodeTree;
+import org.apache.tajo.plan.visitor.BasicLogicalPlanVisitor;
+import org.apache.tajo.util.ProtoUtil;
+import org.apache.tajo.util.TUtil;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+
+/**
+ * It serializes a logical plan into a protobuf-based serialized bytes.
+ *
+ * In detail, it traverses all logical nodes in a postfix order.
+ * For each visiting node, it serializes the node and adds the serialized bytes into a list.
+ * Then, a list will contains a list of serialized nodes in a postfix order.
+ *
+ * @see org.apache.tajo.plan.serder.LogicalNodeDeserializer
+ */
+public class LogicalNodeSerializer extends BasicLogicalPlanVisitor<LogicalNodeSerializer.SerializeContext,
+ LogicalNode> {
+
+ private static final LogicalNodeSerializer instance;
+
+ static {
+ instance = new LogicalNodeSerializer();
+ }
+
+ /**
+ * Serialize a logical plan into a protobuf-based serialized bytes.
+ *
+ * @param node LogicalNode to be serialized
+ * @return A list of serialized nodes
+ */
+ public static LogicalNodeTree serialize(LogicalNode node) {
+ SerializeContext context = new SerializeContext();
+ try {
+ instance.visit(context, null, null, node, new Stack<LogicalNode>());
+ } catch (PlanningException e) {
+ throw new RuntimeException(e);
+ }
+ return context.treeBuilder.build();
+ }
+
+ private static PlanProto.LogicalNode.Builder createNodeBuilder(SerializeContext context, LogicalNode node) {
+ int selfId;
+ if (context.idMap.containsKey(node)) {
+ selfId = context.idMap.get(node);
+ } else {
+ selfId = context.seqId++;
+ context.idMap.put(node, selfId);
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = PlanProto.LogicalNode.newBuilder();
+ nodeBuilder.setVisitSeq(selfId);
+ nodeBuilder.setNodeId(node.getPID());
+ nodeBuilder.setType(convertType(node.getType()));
+
+ // some DDL statements like DropTable or DropDatabase do not have in/out schemas
+ if (node.getInSchema() != null) {
+ nodeBuilder.setInSchema(node.getInSchema().getProto());
+ }
+ if (node.getOutSchema() != null) {
+ nodeBuilder.setOutSchema(node.getOutSchema().getProto());
+ }
+ return nodeBuilder;
+ }
+
+ public static class SerializeContext {
+ private int seqId = 0;
+ private Map<LogicalNode, Integer> idMap = Maps.newHashMap();
+ private LogicalNodeTree.Builder treeBuilder = LogicalNodeTree.newBuilder();
+ }
+
+ public LogicalNode visitRoot(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ LogicalRootNode root, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitRoot(context, plan, block, root, stack);
+
+ int [] childIds = registerGetChildIds(context, root);
+
+ PlanProto.RootNode.Builder rootBuilder = PlanProto.RootNode.newBuilder();
+ rootBuilder.setChildSeq(childIds[0]);
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, root);
+ nodeBuilder.setRoot(rootBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return root;
+ }
+
+ @Override
+ public LogicalNode visitSetSession(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ SetSessionNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitSetSession(context, plan, block, node, stack);
+
+ PlanProto.SetSessionNode.Builder builder = PlanProto.SetSessionNode.newBuilder();
+ builder.setName(node.getName());
+ if (node.hasValue()) {
+ builder.setValue(node.getValue());
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setSetSession(builder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ public LogicalNode visitEvalExpr(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ EvalExprNode exprEval, Stack<LogicalNode> stack) throws PlanningException {
+ PlanProto.EvalExprNode.Builder exprEvalBuilder = PlanProto.EvalExprNode.newBuilder();
+ exprEvalBuilder.addAllTargets(
+ ProtoUtil.<PlanProto.Target>toProtoObjects(exprEval.getTargets()));
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, exprEval);
+ nodeBuilder.setExprEval(exprEvalBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return exprEval;
+ }
+
+ public LogicalNode visitProjection(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ ProjectionNode projection, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitProjection(context, plan, block, projection, stack);
+
+ int [] childIds = registerGetChildIds(context, projection);
+
+ PlanProto.ProjectionNode.Builder projectionBuilder = PlanProto.ProjectionNode.newBuilder();
+ projectionBuilder.setChildSeq(childIds[0]);
+ projectionBuilder.addAllTargets(
+ ProtoUtil.<PlanProto.Target>toProtoObjects(projection.getTargets()));
+ projectionBuilder.setDistinct(projection.isDistinct());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, projection);
+ nodeBuilder.setProjection(projectionBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return projection;
+ }
+
+ @Override
+ public LogicalNode visitLimit(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ LimitNode limit, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitLimit(context, plan, block, limit, stack);
+
+ int [] childIds = registerGetChildIds(context, limit);
+
+ PlanProto.LimitNode.Builder limitBuilder = PlanProto.LimitNode.newBuilder();
+ limitBuilder.setChildSeq(childIds[0]);
+ limitBuilder.setFetchFirstNum(limit.getFetchFirstNum());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, limit);
+ nodeBuilder.setLimit(limitBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return limit;
+ }
+
+ public LogicalNode visitWindowAgg(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ WindowAggNode windowAgg, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitWindowAgg(context, plan, block, windowAgg, stack);
+
+ int [] childIds = registerGetChildIds(context, windowAgg);
+
+ PlanProto.WindowAggNode.Builder windowAggBuilder = PlanProto.WindowAggNode.newBuilder();
+ windowAggBuilder.setChildSeq(childIds[0]);
+
+ if (windowAgg.hasPartitionKeys()) {
+ windowAggBuilder.addAllPartitionKeys(
+ ProtoUtil.<CatalogProtos.ColumnProto>toProtoObjects(windowAgg.getPartitionKeys()));
+ }
+
+ if (windowAgg.hasAggFunctions()) {
+ windowAggBuilder.addAllWindowFunctions(
+ ProtoUtil.<PlanProto.EvalNodeTree>toProtoObjects(windowAgg.getWindowFunctions()));
+ }
+ windowAggBuilder.setDistinct(windowAgg.isDistinct());
+
+ if (windowAgg.hasSortSpecs()) {
+ windowAggBuilder.addAllSortSpecs(
+ ProtoUtil.<CatalogProtos.SortSpecProto>toProtoObjects(windowAgg.getSortSpecs()));
+ }
+ if (windowAgg.hasTargets()) {
+ windowAggBuilder.addAllTargets(
+ ProtoUtil.<PlanProto.Target>toProtoObjects(windowAgg.getTargets()));
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, windowAgg);
+ nodeBuilder.setWindowAgg(windowAggBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return windowAgg;
+ }
+
+ @Override
+ public LogicalNode visitSort(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ SortNode sort, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitSort(context, plan, block, sort, stack);
+
+ int [] childIds = registerGetChildIds(context, sort);
+
+ PlanProto.SortNode.Builder sortBuilder = PlanProto.SortNode.newBuilder();
+ sortBuilder.setChildSeq(childIds[0]);
+ for (int i = 0; i < sort.getSortKeys().length; i++) {
+ sortBuilder.addSortSpecs(sort.getSortKeys()[i].getProto());
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, sort);
+ nodeBuilder.setSort(sortBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return sort;
+ }
+
+ @Override
+ public LogicalNode visitHaving(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ HavingNode having, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitHaving(context, plan, block, having, stack);
+
+ int [] childIds = registerGetChildIds(context, having);
+
+ PlanProto.FilterNode.Builder filterBuilder = PlanProto.FilterNode.newBuilder();
+ filterBuilder.setChildSeq(childIds[0]);
+ filterBuilder.setQual(EvalNodeSerializer.serialize(having.getQual()));
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, having);
+ nodeBuilder.setFilter(filterBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return having;
+ }
+
+ public LogicalNode visitGroupBy(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ GroupbyNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitGroupBy(context, plan, block, node, new Stack<LogicalNode>());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = buildGroupby(context, node);
+ context.treeBuilder.addNodes(nodeBuilder);
+ return node;
+ }
+
+ private PlanProto.LogicalNode.Builder buildGroupby(SerializeContext context, GroupbyNode node)
+ throws PlanningException {
+ int [] childIds = registerGetChildIds(context, node);
+
+ PlanProto.GroupbyNode.Builder groupbyBuilder = PlanProto.GroupbyNode.newBuilder();
+ groupbyBuilder.setChildSeq(childIds[0]);
+ groupbyBuilder.setDistinct(node.isDistinct());
+
+ if (node.groupingKeyNum() > 0) {
+ groupbyBuilder.addAllGroupingKeys(
+ ProtoUtil.<CatalogProtos.ColumnProto>toProtoObjects(node.getGroupingColumns()));
+ }
+ if (node.hasAggFunctions()) {
+ groupbyBuilder.addAllAggFunctions(
+ ProtoUtil.<PlanProto.EvalNodeTree>toProtoObjects(node.getAggFunctions()));
+ }
+ if (node.hasTargets()) {
+ groupbyBuilder.addAllTargets(ProtoUtil.<PlanProto.Target>toProtoObjects(node.getTargets()));
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setGroupby(groupbyBuilder);
+
+ return nodeBuilder;
+ }
+
+ public LogicalNode visitDistinctGroupby(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ DistinctGroupbyNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitDistinctGroupby(context, plan, block, node, new Stack<LogicalNode>());
+
+ int [] childIds = registerGetChildIds(context, node);
+
+ PlanProto.DistinctGroupbyNode.Builder distGroupbyBuilder = PlanProto.DistinctGroupbyNode.newBuilder();
+ distGroupbyBuilder.setChildSeq(childIds[0]);
+ if (node.getGroupbyPlan() != null) {
+ distGroupbyBuilder.setGroupbyNode(buildGroupby(context, node.getGroupbyPlan()));
+ }
+
+ for (GroupbyNode subPlan : node.getSubPlans()) {
+ distGroupbyBuilder.addSubPlans(buildGroupby(context, subPlan));
+ }
+
+ if (node.getGroupingColumns().length > 0) {
+ distGroupbyBuilder.addAllGroupingKeys(
+ ProtoUtil.<CatalogProtos.ColumnProto>toProtoObjects(node.getGroupingColumns()));
+ }
+ if (node.getAggFunctions().length > 0) {
+ distGroupbyBuilder.addAllAggFunctions(
+ ProtoUtil.<PlanProto.EvalNodeTree>toProtoObjects(node.getAggFunctions()));
+ }
+ if (node.hasTargets()) {
+ distGroupbyBuilder.addAllTargets(ProtoUtil.<PlanProto.Target>toProtoObjects(node.getTargets()));
+ }
+ for (int cid : node.getResultColumnIds()) {
+ distGroupbyBuilder.addResultId(cid);
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setDistinctGroupby(distGroupbyBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitFilter(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ SelectionNode filter, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitFilter(context, plan, block, filter, stack);
+
+ int [] childIds = registerGetChildIds(context, filter);
+
+ PlanProto.FilterNode.Builder filterBuilder = PlanProto.FilterNode.newBuilder();
+ filterBuilder.setChildSeq(childIds[0]);
+ filterBuilder.setQual(EvalNodeSerializer.serialize(filter.getQual()));
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, filter);
+ nodeBuilder.setFilter(filterBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return filter;
+ }
+
+ public LogicalNode visitJoin(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block, JoinNode join,
+ Stack<LogicalNode> stack) throws PlanningException {
+ super.visitJoin(context, plan, block, join, stack);
+
+ int [] childIds = registerGetChildIds(context, join);
+
+ // building itself
+ PlanProto.JoinNode.Builder joinBuilder = PlanProto.JoinNode.newBuilder();
+ joinBuilder.setJoinType(convertJoinType(join.getJoinType()));
+ joinBuilder.setLeftChildSeq(childIds[0]);
+ joinBuilder.setRightChilSeq(childIds[1]);
+ if (join.hasJoinQual()) {
+ joinBuilder.setJoinQual(EvalNodeSerializer.serialize(join.getJoinQual()));
+ }
+
+ if (join.hasTargets()) {
+ joinBuilder.setExistsTargets(true);
+ joinBuilder.addAllTargets(ProtoUtil.<PlanProto.Target>toProtoObjects(join.getTargets()));
+ } else {
+ joinBuilder.setExistsTargets(false);
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, join);
+ nodeBuilder.setJoin(joinBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return join;
+ }
+
+ @Override
+ public LogicalNode visitUnion(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block, UnionNode node,
+ Stack<LogicalNode> stack) throws PlanningException {
+ super.visitUnion(context, plan, block, node, stack);
+
+ int [] childIds = registerGetChildIds(context, node);
+
+ PlanProto.UnionNode.Builder unionBuilder = PlanProto.UnionNode.newBuilder();
+ unionBuilder.setAll(true);
+ unionBuilder.setLeftChildSeq(childIds[0]);
+ unionBuilder.setRightChildSeq(childIds[1]);
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setUnion(unionBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitScan(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ ScanNode scan, Stack<LogicalNode> stack) throws PlanningException {
+
+ PlanProto.ScanNode.Builder scanBuilder = buildScanNode(scan);
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, scan);
+ nodeBuilder.setScan(scanBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return scan;
+ }
+
+ public PlanProto.ScanNode.Builder buildScanNode(ScanNode scan) {
+ PlanProto.ScanNode.Builder scanBuilder = PlanProto.ScanNode.newBuilder();
+ scanBuilder.setTable(scan.getTableDesc().getProto());
+ if (scan.hasAlias()) {
+ scanBuilder.setAlias(scan.getAlias());
+ }
+
+ if (scan.hasTargets()) {
+ scanBuilder.setExistTargets(true);
+ scanBuilder.addAllTargets(ProtoUtil.<PlanProto.Target>toProtoObjects(scan.getTargets()));
+ } else {
+ scanBuilder.setExistTargets(false);
+ }
+
+ if (scan.hasQual()) {
+ scanBuilder.setQual(EvalNodeSerializer.serialize(scan.getQual()));
+ }
+ return scanBuilder;
+ }
+
+ @Override
+ public LogicalNode visitPartitionedTableScan(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ PartitionedTableScanNode node, Stack<LogicalNode> stack)
+ throws PlanningException {
+
+ PlanProto.ScanNode.Builder scanBuilder = buildScanNode(node);
+
+ PlanProto.PartitionScanSpec.Builder partitionScan = PlanProto.PartitionScanSpec.newBuilder();
+ List<String> pathStrs = TUtil.newList();
+ if (node.getInputPaths() != null) {
+ for (Path p : node.getInputPaths()) {
+ pathStrs.add(p.toString());
+ }
+ partitionScan.addAllPaths(pathStrs);
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setScan(scanBuilder);
+ nodeBuilder.setPartitionScan(partitionScan);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ public LogicalNode visitTableSubQuery(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ TableSubQueryNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitTableSubQuery(context, plan, block, node, stack);
+
+ int [] childIds = registerGetChildIds(context, node);
+
+ PlanProto.TableSubQueryNode.Builder builder = PlanProto.TableSubQueryNode.newBuilder();
+ builder.setChildSeq(childIds[0]);
+
+ builder.setTableName(node.getTableName());
+
+ if (node.hasTargets()) {
+ builder.addAllTargets(ProtoUtil.<PlanProto.Target>toProtoObjects(node.getTargets()));
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setTableSubQuery(builder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ public LogicalNode visitCreateTable(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ CreateTableNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitCreateTable(context, plan, block, node, stack);
+
+ int [] childIds = registerGetChildIds(context, node);
+
+ PlanProto.PersistentStoreNode.Builder persistentStoreBuilder = buildPersistentStoreBuilder(node, childIds);
+ PlanProto.StoreTableNodeSpec.Builder storeTableBuilder = buildStoreTableNodeSpec(node);
+
+ PlanProto.CreateTableNodeSpec.Builder createTableBuilder = PlanProto.CreateTableNodeSpec.newBuilder();
+ createTableBuilder.setSchema(node.getTableSchema().getProto());
+ createTableBuilder.setExternal(node.isExternal());
+ if (node.isExternal() && node.hasPath()) {
+ createTableBuilder.setPath(node.getPath().toString());
+ }
+ createTableBuilder.setIfNotExists(node.isIfNotExists());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setPersistentStore(persistentStoreBuilder);
+ nodeBuilder.setStoreTable(storeTableBuilder);
+ nodeBuilder.setCreateTable(createTableBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitDropTable(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ DropTableNode node, Stack<LogicalNode> stack) {
+ PlanProto.DropTableNode.Builder dropTableBuilder = PlanProto.DropTableNode.newBuilder();
+ dropTableBuilder.setTableName(node.getTableName());
+ dropTableBuilder.setIfExists(node.isIfExists());
+ dropTableBuilder.setPurge(node.isPurge());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setDropTable(dropTableBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitAlterTablespace(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ AlterTablespaceNode node, Stack<LogicalNode> stack) throws PlanningException {
+ PlanProto.AlterTablespaceNode.Builder alterTablespaceBuilder = PlanProto.AlterTablespaceNode.newBuilder();
+ alterTablespaceBuilder.setTableSpaceName(node.getTablespaceName());
+
+ switch (node.getSetType()) {
+ case LOCATION:
+ alterTablespaceBuilder.setSetType(PlanProto.AlterTablespaceNode.Type.LOCATION);
+ alterTablespaceBuilder.setSetLocation(SetLocation.newBuilder().setLocation(node.getLocation()));
+ break;
+
+ default:
+ throw new UnimplementedException("Unknown SET type in ALTER TABLESPACE: " + node.getSetType().name());
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setAlterTablespace(alterTablespaceBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitAlterTable(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ AlterTableNode node, Stack<LogicalNode> stack) {
+ PlanProto.AlterTableNode.Builder alterTableBuilder = PlanProto.AlterTableNode.newBuilder();
+ alterTableBuilder.setTableName(node.getTableName());
+
+ switch (node.getAlterTableOpType()) {
+ case RENAME_TABLE:
+ alterTableBuilder.setSetType(PlanProto.AlterTableNode.Type.RENAME_TABLE);
+ alterTableBuilder.setRenameTable(RenameTable.newBuilder().setNewName(node.getNewTableName()));
+ break;
+ case ADD_COLUMN:
+ alterTableBuilder.setSetType(PlanProto.AlterTableNode.Type.ADD_COLUMN);
+ alterTableBuilder.setAddColumn(AddColumn.newBuilder().setAddColumn(node.getAddNewColumn().getProto()));
+ break;
+ case RENAME_COLUMN:
+ alterTableBuilder.setSetType(PlanProto.AlterTableNode.Type.RENAME_COLUMN);
+ alterTableBuilder.setRenameColumn(RenameColumn.newBuilder()
+ .setOldName(node.getColumnName())
+ .setNewName(node.getNewColumnName()));
+ break;
+ default:
+ throw new UnimplementedException("Unknown SET type in ALTER TABLE: " + node.getAlterTableOpType().name());
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setAlterTable(alterTableBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitTruncateTable(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ TruncateTableNode node, Stack<LogicalNode> stack) throws PlanningException {
+ PlanProto.TruncateTableNode.Builder truncateTableBuilder = PlanProto.TruncateTableNode.newBuilder();
+ truncateTableBuilder.addAllTableNames(node.getTableNames());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setTruncateTableNode(truncateTableBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ public LogicalNode visitInsert(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ InsertNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitInsert(context, plan, block, node, stack);
+
+ int [] childIds = registerGetChildIds(context, node);
+
+ PlanProto.PersistentStoreNode.Builder persistentStoreBuilder = buildPersistentStoreBuilder(node, childIds);
+ PlanProto.StoreTableNodeSpec.Builder storeTableBuilder = buildStoreTableNodeSpec(node);
+
+ PlanProto.InsertNodeSpec.Builder insertNodeSpec = PlanProto.InsertNodeSpec.newBuilder();
+ insertNodeSpec.setOverwrite(node.isOverwrite());
+ insertNodeSpec.setTableSchema(node.getTableSchema().getProto());
+ if (node.hasProjectedSchema()) {
+ insertNodeSpec.setProjectedSchema(node.getProjectedSchema().getProto());
+ }
+ if (node.hasTargetSchema()) {
+ insertNodeSpec.setTargetSchema(node.getTargetSchema().getProto());
+ }
+ if (node.hasPath()) {
+ insertNodeSpec.setPath(node.getPath().toString());
+ }
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setPersistentStore(persistentStoreBuilder);
+ nodeBuilder.setStoreTable(storeTableBuilder);
+ nodeBuilder.setInsert(insertNodeSpec);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ private static PlanProto.PersistentStoreNode.Builder buildPersistentStoreBuilder(PersistentStoreNode node,
+ int [] childIds) {
+ PlanProto.PersistentStoreNode.Builder persistentStoreBuilder = PlanProto.PersistentStoreNode.newBuilder();
+ persistentStoreBuilder.setChildSeq(childIds[0]);
+ persistentStoreBuilder.setStorageType(node.getStorageType());
+ if (node.hasOptions()) {
+ persistentStoreBuilder.setTableProperties(node.getOptions().getProto());
+ }
+ return persistentStoreBuilder;
+ }
+
+ private static PlanProto.StoreTableNodeSpec.Builder buildStoreTableNodeSpec(StoreTableNode node) {
+ PlanProto.StoreTableNodeSpec.Builder storeTableBuilder = PlanProto.StoreTableNodeSpec.newBuilder();
+ if (node.hasPartition()) {
+ storeTableBuilder.setPartitionMethod(node.getPartitionMethod().getProto());
+ }
+ if (node.hasTableName()) { // It will be false if node is for INSERT INTO LOCATION '...'
+ storeTableBuilder.setTableName(node.getTableName());
+ }
+ return storeTableBuilder;
+ }
+
+ @Override
+ public LogicalNode visitCreateDatabase(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ CreateDatabaseNode node, Stack<LogicalNode> stack) throws PlanningException {
+ PlanProto.CreateDatabaseNode.Builder createDatabaseBuilder = PlanProto.CreateDatabaseNode.newBuilder();
+ createDatabaseBuilder.setDbName(node.getDatabaseName());
+ createDatabaseBuilder.setIfNotExists(node.isIfNotExists());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setCreateDatabase(createDatabaseBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ @Override
+ public LogicalNode visitDropDatabase(SerializeContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ DropDatabaseNode node, Stack<LogicalNode> stack) throws PlanningException {
+ PlanProto.DropDatabaseNode.Builder dropDatabaseBuilder = PlanProto.DropDatabaseNode.newBuilder();
+ dropDatabaseBuilder.setDbName(node.getDatabaseName());
+ dropDatabaseBuilder.setIfExists(node.isIfExists());
+
+ PlanProto.LogicalNode.Builder nodeBuilder = createNodeBuilder(context, node);
+ nodeBuilder.setDropDatabase(dropDatabaseBuilder);
+ context.treeBuilder.addNodes(nodeBuilder);
+
+ return node;
+ }
+
+ public static PlanProto.NodeType convertType(NodeType type) {
+ return PlanProto.NodeType.valueOf(type.name());
+ }
+
+ public static PlanProto.JoinType convertJoinType(JoinType type) {
+ switch (type) {
+ case CROSS:
+ return PlanProto.JoinType.CROSS_JOIN;
+ case INNER:
+ return PlanProto.JoinType.INNER_JOIN;
+ case LEFT_OUTER:
+ return PlanProto.JoinType.LEFT_OUTER_JOIN;
+ case RIGHT_OUTER:
+ return PlanProto.JoinType.RIGHT_OUTER_JOIN;
+ case FULL_OUTER:
+ return PlanProto.JoinType.FULL_OUTER_JOIN;
+ case LEFT_SEMI:
+ return PlanProto.JoinType.LEFT_SEMI_JOIN;
+ case RIGHT_SEMI:
+ return PlanProto.JoinType.RIGHT_SEMI_JOIN;
+ case LEFT_ANTI:
+ return PlanProto.JoinType.LEFT_ANTI_JOIN;
+ case RIGHT_ANTI:
+ return PlanProto.JoinType.RIGHT_ANTI_JOIN;
+ case UNION:
+ return PlanProto.JoinType.UNION_JOIN;
+ default:
+ throw new RuntimeException("Unknown JoinType: " + type.name());
+ }
+ }
+
+ public static PlanProto.Target convertTarget(Target target) {
+ PlanProto.Target.Builder targetBuilder = PlanProto.Target.newBuilder();
+ targetBuilder.setExpr(EvalNodeSerializer.serialize(target.getEvalTree()));
+ if (target.hasAlias()) {
+ targetBuilder.setAlias(target.getAlias());
+ }
+ return targetBuilder.build();
+ }
+
+ private int [] registerGetChildIds(SerializeContext context, LogicalNode node) {
+ int [] childIds = new int[node.childNum()];
+ for (int i = 0; i < node.childNum(); i++) {
+ if (context.idMap.containsKey(node.getChild(i))) {
+ childIds[i] = context.idMap.get(node.getChild(i));
+ } else {
+ childIds[i] = context.seqId++;
+ }
+ }
+ return childIds;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/package-info.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/package-info.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/package-info.java
new file mode 100644
index 0000000..b148fec
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package provides (de)serialization API for logical plan and it related parts.
+ * They employ protocol buffer to (de)serialize logical plans.
+ */
+package org.apache.tajo.plan.serder;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java b/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
index 4672577..d813432 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
@@ -26,10 +26,8 @@ import org.apache.tajo.SessionVars;
import org.apache.tajo.algebra.*;
import org.apache.tajo.annotation.Nullable;
import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.proto.CatalogProtos;
import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
import org.apache.tajo.common.TajoDataTypes.DataType;
-import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.plan.*;
import org.apache.tajo.plan.expr.*;
import org.apache.tajo.plan.logical.*;
@@ -48,6 +46,9 @@ import static org.apache.tajo.catalog.proto.CatalogProtos.StoreType.TEXTFILE;
public class PlannerUtil {
+ public static final Column [] EMPTY_COLUMNS = new Column[] {};
+ public static final AggregationFunctionCallEval [] EMPTY_AGG_FUNCS = new AggregationFunctionCallEval[] {};
+
public static boolean checkIfSetSession(LogicalNode node) {
LogicalNode baseNode = node;
if (node instanceof LogicalRootNode) {
@@ -698,7 +699,7 @@ public class PlannerUtil {
copy.setPID(plan.newPID());
if (node instanceof DistinctGroupbyNode) {
DistinctGroupbyNode dNode = (DistinctGroupbyNode)copy;
- for (GroupbyNode eachNode: dNode.getGroupByNodes()) {
+ for (GroupbyNode eachNode: dNode.getSubPlans()) {
eachNode.setPID(plan.newPID());
}
}
@@ -762,15 +763,6 @@ public class PlannerUtil {
return names;
}
- public static SortSpec[] convertSortSpecs(Collection<CatalogProtos.SortSpecProto> sortSpecProtos) {
- SortSpec[] sortSpecs = new SortSpec[sortSpecProtos.size()];
- int i = 0;
- for (CatalogProtos.SortSpecProto proto : sortSpecProtos) {
- sortSpecs[i++] = new SortSpec(proto);
- }
- return sortSpecs;
- }
-
/**
* Generate an explain string of a LogicalNode and its descendant nodes.
*
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/BasicLogicalPlanVisitor.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/BasicLogicalPlanVisitor.java b/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/BasicLogicalPlanVisitor.java
index d09710e..23c834d 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/BasicLogicalPlanVisitor.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/BasicLogicalPlanVisitor.java
@@ -63,7 +63,8 @@ public class BasicLogicalPlanVisitor<CONTEXT, RESULT> implements LogicalPlanVisi
current = visitSetSession(context, plan, block, (SetSessionNode) node, stack);
break;
case EXPRS:
- return null;
+ current = visitEvalExpr(context, plan, block, (EvalExprNode) node, stack);
+ break;
case PROJECTION:
current = visitProjection(context, plan, block, (ProjectionNode) node, stack);
break;
@@ -83,7 +84,7 @@ public class BasicLogicalPlanVisitor<CONTEXT, RESULT> implements LogicalPlanVisi
current = visitWindowAgg(context, plan, block, (WindowAggNode) node, stack);
break;
case DISTINCT_GROUP_BY:
- current = visitDistinct(context, plan, block, (DistinctGroupbyNode) node, stack);
+ current = visitDistinctGroupby(context, plan, block, (DistinctGroupbyNode) node, stack);
break;
case SELECTION:
current = visitFilter(context, plan, block, (SelectionNode) node, stack);
@@ -159,6 +160,12 @@ public class BasicLogicalPlanVisitor<CONTEXT, RESULT> implements LogicalPlanVisi
}
@Override
+ public RESULT visitEvalExpr(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, EvalExprNode node,
+ Stack<LogicalNode> stack) throws PlanningException {
+ return null;
+ }
+
+ @Override
public RESULT visitProjection(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, ProjectionNode node,
Stack<LogicalNode> stack)
throws PlanningException {
@@ -213,8 +220,8 @@ public class BasicLogicalPlanVisitor<CONTEXT, RESULT> implements LogicalPlanVisi
return result;
}
- public RESULT visitDistinct(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, DistinctGroupbyNode node,
- Stack<LogicalNode> stack) throws PlanningException {
+ public RESULT visitDistinctGroupby(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ DistinctGroupbyNode node, Stack<LogicalNode> stack) throws PlanningException {
stack.push(node);
RESULT result = visit(context, plan, block, node.getChild(), stack);
stack.pop();
@@ -244,10 +251,17 @@ public class BasicLogicalPlanVisitor<CONTEXT, RESULT> implements LogicalPlanVisi
public RESULT visitUnion(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, UnionNode node,
Stack<LogicalNode> stack) throws PlanningException {
stack.push(node);
- LogicalPlan.QueryBlock leftBlock = plan.getBlock(node.getLeftChild());
- RESULT result = visit(context, plan, leftBlock, leftBlock.getRoot(), stack);
- LogicalPlan.QueryBlock rightBlock = plan.getBlock(node.getRightChild());
- visit(context, plan, rightBlock, rightBlock.getRoot(), stack);
+ RESULT result = null;
+ if (plan != null) {
+ LogicalPlan.QueryBlock leftBlock = plan.getBlock(node.getLeftChild());
+ result = visit(context, plan, leftBlock, leftBlock.getRoot(), stack);
+ LogicalPlan.QueryBlock rightBlock = plan.getBlock(node.getRightChild());
+ visit(context, plan, rightBlock, rightBlock.getRoot(), stack);
+ } else {
+ result = visit(context, plan, null, node.getLeftChild(), stack);
+ visit(context, plan, null, node.getRightChild(), stack);
+ }
+
stack.pop();
return result;
}
@@ -276,8 +290,13 @@ public class BasicLogicalPlanVisitor<CONTEXT, RESULT> implements LogicalPlanVisi
public RESULT visitTableSubQuery(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block,
TableSubQueryNode node, Stack<LogicalNode> stack) throws PlanningException {
stack.push(node);
- LogicalPlan.QueryBlock childBlock = plan.getBlock(node.getSubQuery());
- RESULT result = visit(context, plan, childBlock, childBlock.getRoot(), stack);
+ RESULT result = null;
+ if (plan != null) {
+ LogicalPlan.QueryBlock childBlock = plan.getBlock(node.getSubQuery());
+ result = visit(context, plan, childBlock, childBlock.getRoot(), stack);
+ } else {
+ result = visit(context, plan, null, node.getSubQuery(), stack);
+ }
stack.pop();
return result;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/ExplainLogicalPlanVisitor.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/ExplainLogicalPlanVisitor.java b/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/ExplainLogicalPlanVisitor.java
index 7065295..52db8eb 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/ExplainLogicalPlanVisitor.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/ExplainLogicalPlanVisitor.java
@@ -117,8 +117,9 @@ public class ExplainLogicalPlanVisitor extends BasicLogicalPlanVisitor<ExplainLo
return visitUnaryNode(context, plan, block, node, stack);
}
- public LogicalNode visitDistinct(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block, DistinctGroupbyNode node,
- Stack<LogicalNode> stack) throws PlanningException {
+ public LogicalNode visitDistinctGroupby(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ DistinctGroupbyNode node,
+ Stack<LogicalNode> stack) throws PlanningException {
return visitUnaryNode(context, plan, block, node, stack);
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/LogicalPlanVisitor.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/LogicalPlanVisitor.java b/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/LogicalPlanVisitor.java
index 6a0c338..5be2eec 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/LogicalPlanVisitor.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/LogicalPlanVisitor.java
@@ -32,6 +32,9 @@ public interface LogicalPlanVisitor<CONTEXT, RESULT> {
RESULT visitSetSession(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, SetSessionNode node,
Stack<LogicalNode> stack) throws PlanningException;
+ RESULT visitEvalExpr(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, EvalExprNode node,
+ Stack<LogicalNode> stack) throws PlanningException;
+
RESULT visitProjection(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, ProjectionNode node,
Stack<LogicalNode> stack) throws PlanningException;
@@ -48,8 +51,8 @@ public interface LogicalPlanVisitor<CONTEXT, RESULT> {
Stack<LogicalNode> stack) throws PlanningException;
RESULT visitWindowAgg(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, WindowAggNode node,
Stack<LogicalNode> stack) throws PlanningException;
- RESULT visitDistinct(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, DistinctGroupbyNode node,
- Stack<LogicalNode> stack) throws PlanningException;
+ RESULT visitDistinctGroupby(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, DistinctGroupbyNode node,
+ Stack<LogicalNode> stack) throws PlanningException;
RESULT visitFilter(CONTEXT context, LogicalPlan plan, LogicalPlan.QueryBlock block, SelectionNode node,
Stack<LogicalNode> stack) throws PlanningException;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/proto/Plan.proto
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/proto/Plan.proto b/tajo-plan/src/main/proto/Plan.proto
index 0f82d87..3e4f07c 100644
--- a/tajo-plan/src/main/proto/Plan.proto
+++ b/tajo-plan/src/main/proto/Plan.proto
@@ -26,58 +26,280 @@ import "CatalogProtos.proto";
import "DataTypes.proto";
enum NodeType {
- BST_INDEX_SCAN = 0;
- EXCEPT = 1;
+ SET_SESSION = 0;
+
+ ROOT = 1;
EXPRS = 2;
- DISTINCT_GROUP_BY = 3;
- GROUP_BY = 4;
- HAVING = 5;
- JOIN = 6;
- INSERT = 7;
- INTERSECT = 8;
- LIMIT = 9;
- PARTITIONS_SCAN = 10;
- PROJECTION = 11;
- ROOT = 12;
- SCAN = 13;
- SELECTION = 14;
- SORT = 15;
- STORE = 16;
- TABLE_SUBQUERY = 17;
- UNION = 18;
- WINDOW_AGG = 19;
-
- CREATE_DATABASE = 20;
- DROP_DATABASE = 21;
- CREATE_TABLE = 22;
- DROP_TABLE = 23;
- ALTER_TABLESPACE = 24;
- ALTER_TABLE = 25;
- TRUNCATE_TABLE = 26;
-}
-
-message LogicalPlan {
- required KeyValueSetProto adjacentList = 1;
+ PROJECTION = 3;
+ LIMIT = 4;
+ WINDOW_AGG = 5;
+ SORT = 6;
+ HAVING = 7;
+ GROUP_BY = 8;
+ DISTINCT_GROUP_BY = 9;
+ SELECTION = 10;
+ JOIN = 11;
+ UNION = 12;
+ INTERSECT = 13;
+ EXCEPT = 14;
+ TABLE_SUBQUERY = 15;
+ SCAN = 16;
+ PARTITIONS_SCAN = 17;
+ BST_INDEX_SCAN = 18;
+ STORE = 19;
+ INSERT = 20;
+
+ CREATE_DATABASE = 21;
+ DROP_DATABASE = 22;
+ CREATE_TABLE = 23;
+ DROP_TABLE = 24;
+ ALTER_TABLESPACE = 25;
+ ALTER_TABLE = 26;
+ TRUNCATE_TABLE = 27;
}
-message LogicalNode {
- required int32 pid = 1;
- required NodeType type = 2;
- required SchemaProto in_schema = 3;
- required SchemaProto out_schema = 4;
- required NodeSpec spec = 5;
+message LogicalNodeTree {
+ repeated LogicalNode nodes = 1;
}
-message NodeSpec {
- optional ScanNode scan = 1;
+message LogicalNode {
+ required int32 visitSeq = 1;
+ required int32 nodeId = 2;
+ required NodeType type = 3;
+ optional SchemaProto in_schema = 4;
+ optional SchemaProto out_schema = 5;
+
+ optional ScanNode scan = 6;
+ optional PartitionScanSpec partitionScan = 7;
+ optional JoinNode join = 8;
+ optional FilterNode filter = 9;
+ optional GroupbyNode groupby = 10;
+ optional DistinctGroupbyNode distinctGroupby = 11;
+ optional SortNode sort = 12;
+ optional LimitNode limit = 13;
+ optional WindowAggNode windowAgg = 14;
+ optional ProjectionNode projection = 15;
+ optional EvalExprNode exprEval = 16;
+ optional UnionNode union = 17;
+ optional TableSubQueryNode tableSubQuery = 18;
+ optional PersistentStoreNode persistentStore = 19;
+ optional StoreTableNodeSpec storeTable = 20;
+ optional InsertNodeSpec insert = 21;
+ optional CreateTableNodeSpec createTable = 22;
+ optional RootNode root = 23;
+ optional SetSessionNode setSession = 24;
+
+ optional CreateDatabaseNode createDatabase = 25;
+ optional DropDatabaseNode dropDatabase = 26;
+ optional DropTableNode dropTable = 27;
+
+ optional AlterTablespaceNode alterTablespace = 28;
+ optional AlterTableNode alterTable = 29;
+ optional TruncateTableNode truncateTableNode = 30;
}
message ScanNode {
required TableDescProto table = 1;
optional string alias = 2;
- required SchemaProto schema = 3;
+ required bool existTargets = 3;
+ repeated Target targets = 4;
+ optional EvalNodeTree qual = 5;
+}
+
+message PartitionScanSpec {
+ repeated string paths = 1;
+}
+
+message FilterNode {
+ required int32 childSeq = 1;
+ required EvalNodeTree qual = 2;
+}
+
+message JoinNode {
+ required int32 leftChildSeq = 1;
+ required int32 rightChilSeq = 2;
+ required JoinType joinType = 3;
+ optional EvalNodeTree joinQual = 4;
+ required bool existsTargets = 5;
+ repeated Target targets = 6;
+}
+
+message GroupbyNode {
+ required int32 childSeq = 1;
+ required bool distinct = 2;
+ repeated ColumnProto groupingKeys = 3;
+ repeated EvalNodeTree aggFunctions = 4;
+ repeated Target targets = 5;
+}
+
+message DistinctGroupbyNode {
+ required int32 childSeq = 1;
+ optional LogicalNode groupbyNode = 2;
+ repeated LogicalNode subPlans = 3;
+ repeated Target targets = 4;
+ repeated ColumnProto groupingKeys = 5;
+ repeated int32 resultId = 6;
+ repeated EvalNodeTree aggFunctions = 7;
+}
+
+message SortNode {
+ required int32 childSeq = 1;
+ repeated SortSpecProto sortSpecs = 2;
+}
+
+message LimitNode {
+ required int32 childSeq = 1;
+ required int64 fetchFirstNum = 2;
+}
+
+message WindowAggNode {
+ required int32 childSeq = 1;
+ repeated ColumnProto partitionKeys = 2;
+ repeated SortSpecProto sortSpecs = 3;
+ repeated EvalNodeTree windowFunctions = 4;
+ required bool distinct = 5; // if distinct aggregation function is included in window function
+ repeated Target targets = 6;
+}
+
+message UnionNode {
+ required int32 leftChildSeq = 1;
+ required int32 rightChildSeq = 2;
+ required bool all = 3;
+}
+
+message TableSubQueryNode {
+ required int32 childSeq = 1;
+ required string tableName = 2;
+ repeated Target targets = 3;
+}
+
+message ProjectionNode {
+ required int32 childSeq = 1;
+ required bool distinct = 2;
+ repeated Target targets = 3;
+}
+
+message EvalExprNode {
+ repeated Target targets = 1;
+}
+
+message RootNode {
+ required int32 childSeq = 1;
+}
+
+message SetSessionNode {
+ required string name = 1;
+ optional string value = 2;
+}
+
+message Target {
+ required EvalNodeTree expr = 1;
+ optional string alias = 2;
}
+enum JoinType {
+ CROSS_JOIN = 0;
+ INNER_JOIN = 1;
+ LEFT_OUTER_JOIN = 2;
+ RIGHT_OUTER_JOIN = 3;
+ FULL_OUTER_JOIN = 4;
+ UNION_JOIN = 5;
+ LEFT_ANTI_JOIN = 6;
+ RIGHT_ANTI_JOIN = 7;
+ LEFT_SEMI_JOIN = 8;
+ RIGHT_SEMI_JOIN = 9;
+}
+
+message PartitionTableScanSpec {
+ repeated string paths = 1;
+}
+
+message PersistentStoreNode {
+ required int32 childSeq = 1;
+ required StoreType storageType = 2;
+ required KeyValueSetProto tableProperties = 3;
+}
+
+message StoreTableNodeSpec { // required PersistentStoreSpec
+ optional string tableName = 1; // 'INSERT INTO LOCATION' does not require 'table name'.
+ optional PartitionMethodProto partitionMethod = 2;
+}
+
+message InsertNodeSpec { // required PersistentStoreSpec and StoreTableSpec
+ required bool overwrite = 1;
+ required SchemaProto tableSchema = 2;
+ optional SchemaProto targetSchema = 4;
+ optional SchemaProto projectedSchema = 3;
+ optional string path = 5;
+}
+
+message CreateTableNodeSpec { // required PersistentStoreSpec and StoreTableNodeSpec
+ required SchemaProto schema = 1;
+ required bool external = 2;
+ required bool ifNotExists = 3;
+ optional string path = 4;
+}
+
+message DropTableNode {
+ required string tableName = 1;
+ required bool ifExists = 2;
+ required bool purge = 3;
+}
+
+message TruncateTableNode {
+ repeated string tableNames = 1;
+}
+
+message CreateDatabaseNode {
+ required string dbName = 1;
+ required bool ifNotExists = 2;
+}
+
+message DropDatabaseNode {
+ required string dbName = 1;
+ required bool ifExists = 2;
+}
+
+message AlterTablespaceNode {
+ enum Type {
+ LOCATION = 0;
+ }
+
+ message SetLocation {
+ required string location = 1;
+ }
+
+ required string tableSpaceName = 1;
+ required Type setType = 2;
+ optional SetLocation setLocation = 3;
+}
+
+message AlterTableNode {
+ enum Type {
+ RENAME_TABLE = 0;
+ RENAME_COLUMN = 1;
+ ADD_COLUMN = 2;
+ }
+
+ message RenameTable {
+ required string newName = 1;
+ }
+
+ message RenameColumn {
+ required string oldName = 1;
+ required string newName = 2;
+ }
+
+ message AddColumn {
+ required ColumnProto addColumn = 1;
+ }
+
+ required string tableName = 1;
+ required Type setType = 2;
+ optional RenameTable renameTable = 3;
+ optional RenameColumn renameColumn = 4;
+ optional AddColumn addColumn = 5;
+}
enum EvalType {
NOT = 0;
@@ -126,7 +348,7 @@ enum EvalType {
CONST = 33;
}
-message EvalTree {
+message EvalNodeTree {
repeated EvalNode nodes = 1;
}
@@ -140,10 +362,13 @@ message EvalNode {
optional ConstEval const = 6;
optional ColumnProto field = 7; // field eval
optional FunctionEval function = 8;
- optional RowConstEval rowConst = 9;
- optional BetweenEval between = 10;
- optional CaseWhenEval casewhen = 11;
- optional IfCondEval ifCond = 12;
+ optional AggFunctionEvalSpec aggFunction = 9;
+ optional WinFunctionEvalSpec winFunction = 10;
+ optional RowConstEval rowConst = 11;
+ optional BetweenEval between = 12;
+ optional CaseWhenEval casewhen = 13;
+ optional IfCondEval ifCond = 14;
+ optional PatternMatchEvalSpec patternMatch = 15;
}
message UnaryEval {
@@ -159,6 +384,10 @@ message BinaryEval {
optional bool negative = 3 [default = false];
}
+message PatternMatchEvalSpec { // requires BinaryEval
+ optional bool caseSensitive = 1;
+}
+
message BetweenEval {
required int32 predicand = 1;
required int32 begin = 2;
@@ -190,6 +419,50 @@ message FunctionEval {
repeated int32 paramIds = 2;
}
+message AggFunctionEvalSpec { // requires FunctionEval
+ required bool intermediatePhase = 1;
+ required bool finalPhase = 2;
+ optional string alias = 3;
+}
+
+message WinFunctionEvalSpec {
+ message WindowFrame {
+ required WindowStartBound startBound = 1;
+ required WindowEndBound endBound = 2;
+ optional WindowFrameUnit unit = 3;
+ }
+
+ enum WindowFrameStartBoundType {
+ S_UNBOUNDED_PRECEDING = 0;
+ S_CURRENT_ROW = 1;
+ S_PRECEDING = 2;
+ }
+
+ enum WindowFrameEndBoundType {
+ E_UNBOUNDED_FOLLOWING = 0;
+ E_CURRENT_ROW = 1;
+ E_FOLLOWING = 2;
+ }
+
+ enum WindowFrameUnit {
+ ROW = 0;
+ RANGE = 1;
+ }
+
+ message WindowStartBound {
+ required WindowFrameStartBoundType boundType = 1;
+ optional EvalNodeTree number = 2;
+ }
+
+ message WindowEndBound {
+ required WindowFrameEndBoundType boundType = 1;
+ optional EvalNodeTree number = 2;
+ }
+
+ repeated SortSpecProto sortSpec = 1;
+ required WindowFrame windowFrame = 2;
+}
+
message Datum {
required Type type = 1;
optional bool boolean = 2;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
index 609ca20..c9f493d 100644
--- a/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
+++ b/tajo-storage/tajo-storage-common/src/main/java/org/apache/tajo/storage/StorageManager.java
@@ -38,7 +38,7 @@ import org.apache.tajo.plan.LogicalPlan;
import org.apache.tajo.plan.logical.LogicalNode;
import org.apache.tajo.plan.logical.NodeType;
import org.apache.tajo.plan.logical.ScanNode;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.storage.fragment.Fragment;
import org.apache.tajo.storage.fragment.FragmentConvertor;
import org.apache.tajo.util.TUtil;
@@ -601,7 +601,7 @@ public abstract class StorageManager {
* @return The list of storage specified rewrite rules
* @throws java.io.IOException
*/
- public List<RewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc) throws IOException {
+ public List<LogicalPlanRewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc) throws IOException {
return null;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/AddSortForInsertRewriter.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/AddSortForInsertRewriter.java b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/AddSortForInsertRewriter.java
index 79161cc..e95aeec 100644
--- a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/AddSortForInsertRewriter.java
+++ b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/AddSortForInsertRewriter.java
@@ -18,6 +18,7 @@
package org.apache.tajo.storage.hbase;
+import org.apache.tajo.OverridableConf;
import org.apache.tajo.catalog.Column;
import org.apache.tajo.catalog.Schema;
import org.apache.tajo.catalog.SortSpec;
@@ -30,12 +31,13 @@ import org.apache.tajo.plan.logical.LogicalRootNode;
import org.apache.tajo.plan.logical.SortNode;
import org.apache.tajo.plan.logical.SortNode.SortPurpose;
import org.apache.tajo.plan.logical.UnaryNode;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.plan.util.PlannerUtil;
-public class AddSortForInsertRewriter implements RewriteRule {
+public class AddSortForInsertRewriter implements LogicalPlanRewriteRule {
private int[] sortColumnIndexes;
private Column[] sortColumns;
+
public AddSortForInsertRewriter(TableDesc tableDesc, Column[] sortColumns) {
this.sortColumns = sortColumns;
this.sortColumnIndexes = new int[sortColumns.length];
@@ -52,13 +54,13 @@ public class AddSortForInsertRewriter implements RewriteRule {
}
@Override
- public boolean isEligible(LogicalPlan plan) {
+ public boolean isEligible(OverridableConf queryContext, LogicalPlan plan) {
StoreType storeType = PlannerUtil.getStoreType(plan);
return storeType != null;
}
@Override
- public LogicalPlan rewrite(LogicalPlan plan) throws PlanningException {
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
LogicalRootNode rootNode = plan.getRootBlock().getRoot();
UnaryNode insertNode = rootNode.getChild();
LogicalNode childNode = insertNode.getChild();
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
----------------------------------------------------------------------
diff --git a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
index de4b4cb..c606e88 100644
--- a/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
+++ b/tajo-storage/tajo-storage-hbase/src/main/java/org/apache/tajo/storage/hbase/HBaseStorageManager.java
@@ -49,7 +49,7 @@ import org.apache.tajo.plan.logical.CreateTableNode;
import org.apache.tajo.plan.logical.LogicalNode;
import org.apache.tajo.plan.logical.NodeType;
import org.apache.tajo.plan.logical.ScanNode;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.storage.*;
import org.apache.tajo.storage.fragment.Fragment;
import org.apache.tajo.util.Bytes;
@@ -1050,9 +1050,9 @@ public class HBaseStorageManager extends StorageManager {
}
}
- public List<RewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc) throws IOException {
+ public List<LogicalPlanRewriteRule> getRewriteRules(OverridableConf queryContext, TableDesc tableDesc) throws IOException {
if ("false".equalsIgnoreCase(queryContext.get(HBaseStorageConstants.INSERT_PUT_MODE, "false"))) {
- List<RewriteRule> rules = new ArrayList<RewriteRule>();
+ List<LogicalPlanRewriteRule> rules = new ArrayList<LogicalPlanRewriteRule>();
rules.add(new AddSortForInsertRewriter(tableDesc, getIndexColumns(tableDesc)));
return rules;
} else {
[8/8] tajo git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/tajo into index_support
Posted by ji...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/tajo into index_support
Conflicts:
tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/8e52ed43
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/8e52ed43
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/8e52ed43
Branch: refs/heads/index_support
Commit: 8e52ed43a72a51f78dce1547cd642a24aa860c58
Parents: df2ff2d 5a97079
Author: Jihoon Son <ji...@apache.org>
Authored: Wed Dec 31 19:51:10 2014 +0900
Committer: Jihoon Son <ji...@apache.org>
Committed: Wed Dec 31 19:51:10 2014 +0900
----------------------------------------------------------------------
CHANGES | 5 +
.../tajo/catalog/AbstractCatalogClient.java | 152 ++++
.../src/main/proto/CatalogProtocol.proto | 9 +-
.../org/apache/tajo/catalog/CatalogService.java | 37 +-
.../java/org/apache/tajo/catalog/Schema.java | 33 +-
.../java/org/apache/tajo/catalog/TableDesc.java | 2 +-
.../src/main/proto/CatalogProtos.proto | 81 ++-
.../tajo/catalog/store/HCatalogStore.java | 53 +-
.../org/apache/tajo/catalog/CatalogServer.java | 247 +++++--
.../dictionary/AbstractTableDescriptor.java | 90 +++
.../catalog/dictionary/ColumnDescriptor.java | 47 ++
.../dictionary/ColumnsTableDescriptor.java | 48 ++
.../dictionary/DatabasesTableDescriptor.java | 47 ++
.../dictionary/IndexesTableDescriptor.java | 52 ++
.../InfoSchemaMetadataDictionary.java | 124 ++++
.../dictionary/PartitionsTableDescriptor.java | 48 ++
.../catalog/dictionary/TableDescriptor.java | 29 +
.../dictionary/TableOptionsTableDescriptor.java | 46 ++
.../dictionary/TableStatsTableDescriptor.java | 46 ++
.../dictionary/TablesTableDescriptor.java | 49 ++
.../dictionary/TablespacesTableDescriptor.java | 48 ++
.../tajo/catalog/store/AbstractDBStore.java | 295 ++++++++
.../apache/tajo/catalog/store/CatalogStore.java | 24 +
.../org/apache/tajo/catalog/store/MemStore.java | 189 +++++
.../org/apache/tajo/catalog/TestCatalog.java | 15 +-
.../java/org/apache/tajo/conf/TajoConf.java | 20 +
.../java/org/apache/tajo/util/ProtoUtil.java | 19 +
.../org/apache/tajo/util/ReflectionUtil.java | 61 +-
.../main/java/org/apache/tajo/util/TUtil.java | 41 ++
.../engine/codegen/ExecutorPreCompiler.java | 8 +-
.../engine/planner/PhysicalPlannerImpl.java | 17 +-
.../tajo/engine/planner/enforce/Enforcer.java | 12 +-
.../engine/planner/global/GlobalPlanner.java | 36 +-
.../global/builder/DistinctGroupbyBuilder.java | 32 +-
.../BaseGlobalPlanRewriteRuleProvider.java | 39 +
.../rewriter/GlobalPlanRewriteEngine.java | 84 +++
.../global/rewriter/GlobalPlanRewriteRule.java | 49 ++
.../rewriter/GlobalPlanRewriteRuleProvider.java | 33 +
.../rewriter/GlobalPlanTestRuleProvider.java | 44 ++
.../rules/GlobalPlanEqualityTester.java | 63 ++
.../DistinctGroupbyFirstAggregationExec.java | 2 +-
.../DistinctGroupbyHashAggregationExec.java | 4 +-
.../DistinctGroupbySecondAggregationExec.java | 2 +-
.../DistinctGroupbySortAggregationExec.java | 4 +-
.../DistinctGroupbyThirdAggregationExec.java | 2 +-
.../apache/tajo/engine/query/TaskRequest.java | 3 +-
.../tajo/engine/query/TaskRequestImpl.java | 28 +-
.../utils/test/ErrorInjectionRewriter.java | 10 +-
.../tajo/master/DefaultTaskScheduler.java | 12 +-
.../org/apache/tajo/master/GlobalEngine.java | 1 +
.../NonForwardQueryResultFileScanner.java | 164 +++++
.../master/NonForwardQueryResultScanner.java | 148 +---
.../NonForwardQueryResultSystemScanner.java | 616 ++++++++++++++++
.../tajo/master/TajoMasterClientService.java | 2 +-
.../apache/tajo/master/exec/QueryExecutor.java | 33 +-
.../master/querymaster/QueryMasterTask.java | 9 +-
.../main/java/org/apache/tajo/worker/Task.java | 4 +-
.../src/main/proto/TajoWorkerProtocol.proto | 14 +-
.../org/apache/tajo/TajoTestingCluster.java | 15 +-
.../apache/tajo/engine/eval/ExprTestBase.java | 14 +-
.../tajo/engine/query/TestGroupByQuery.java | 59 +-
.../tajo/engine/query/TestSelectQuery.java | 27 +-
.../tajo/engine/query/TestTruncateTable.java | 8 +-
.../tajo/engine/query/TestWindowQuery.java | 6 +-
.../apache/tajo/master/TestGlobalPlanner.java | 6 +-
.../TestNonForwardQueryResultSystemScanner.java | 296 ++++++++
.../org/apache/tajo/plan/LogicalOptimizer.java | 52 +-
.../tajo/plan/LogicalPlanPreprocessor.java | 4 +-
.../org/apache/tajo/plan/LogicalPlanner.java | 16 +-
.../main/java/org/apache/tajo/plan/Target.java | 13 +-
.../plan/expr/AggregationFunctionCallEval.java | 38 +-
.../org/apache/tajo/plan/expr/EvalNode.java | 10 +-
.../tajo/plan/expr/WindowFunctionEval.java | 12 +
.../tajo/plan/logical/AlterTableNode.java | 10 +
.../tajo/plan/logical/AlterTablespaceNode.java | 13 +-
.../apache/tajo/plan/logical/BinaryNode.java | 16 +
.../tajo/plan/logical/CreateDatabaseNode.java | 10 +
.../tajo/plan/logical/CreateTableNode.java | 11 +-
.../tajo/plan/logical/DistinctGroupbyNode.java | 39 +-
.../tajo/plan/logical/DropDatabaseNode.java | 17 +-
.../apache/tajo/plan/logical/DropTableNode.java | 10 +
.../apache/tajo/plan/logical/EvalExprNode.java | 12 +-
.../apache/tajo/plan/logical/GroupbyNode.java | 47 +-
.../apache/tajo/plan/logical/InsertNode.java | 15 +-
.../apache/tajo/plan/logical/LogicalNode.java | 16 +-
.../org/apache/tajo/plan/logical/NodeType.java | 6 +-
.../tajo/plan/logical/ProjectionNode.java | 12 +-
.../apache/tajo/plan/logical/RelationNode.java | 2 +-
.../org/apache/tajo/plan/logical/ScanNode.java | 13 +-
.../tajo/plan/logical/SetSessionNode.java | 22 +-
.../tajo/plan/logical/StoreTableNode.java | 11 +-
.../tajo/plan/logical/TableSubQueryNode.java | 12 +-
.../tajo/plan/logical/TruncateTableNode.java | 10 +
.../org/apache/tajo/plan/logical/UnaryNode.java | 14 +
.../apache/tajo/plan/logical/WindowSpec.java | 57 +-
.../tajo/plan/nameresolver/NameResolver.java | 6 +-
.../plan/nameresolver/ResolverByLegacy.java | 2 +-
.../rewrite/BaseLogicalPlanRewriteEngine.java | 89 +++
.../BaseLogicalPlanRewriteRuleProvider.java | 59 ++
.../plan/rewrite/BasicQueryRewriteEngine.java | 72 --
.../plan/rewrite/LogicalPlanRewriteEngine.java | 33 +
.../plan/rewrite/LogicalPlanRewriteRule.java | 57 ++
.../rewrite/LogicalPlanRewriteRuleProvider.java | 44 ++
.../rewrite/LogicalPlanTestRuleProvider.java | 44 ++
.../tajo/plan/rewrite/QueryRewriteEngine.java | 32 -
.../apache/tajo/plan/rewrite/RewriteRule.java | 56 --
.../plan/rewrite/rules/FilterPushDownRule.java | 9 +-
.../rules/LogicalPlanEqualityTester.java | 55 ++
.../rewrite/rules/PartitionedTableRewriter.java | 44 +-
.../rewrite/rules/ProjectionPushDownRule.java | 11 +-
.../tajo/plan/serder/EvalNodeDeserializer.java | 301 ++++++++
.../tajo/plan/serder/EvalNodeSerializer.java | 397 ++++++++++
.../plan/serder/EvalTreeProtoDeserializer.java | 218 ------
.../plan/serder/EvalTreeProtoSerializer.java | 310 --------
.../plan/serder/LogicalNodeDeserializer.java | 678 +++++++++++++++++
.../tajo/plan/serder/LogicalNodeSerializer.java | 724 +++++++++++++++++++
.../apache/tajo/plan/serder/package-info.java | 23 +
.../org/apache/tajo/plan/util/PlannerUtil.java | 37 +-
.../plan/visitor/BasicLogicalPlanVisitor.java | 39 +-
.../plan/visitor/ExplainLogicalPlanVisitor.java | 5 +-
.../tajo/plan/visitor/LogicalPlanVisitor.java | 7 +-
tajo-plan/src/main/proto/Plan.proto | 363 ++++++++--
.../org/apache/tajo/storage/StorageManager.java | 4 +-
.../storage/hbase/AddSortForInsertRewriter.java | 10 +-
.../tajo/storage/hbase/HBaseStorageManager.java | 6 +-
125 files changed, 6820 insertions(+), 1298 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
----------------------------------------------------------------------
diff --cc tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
index 1912cde,30b1767..3f4d38d
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
@@@ -728,9 -869,9 +869,9 @@@ public class CatalogServer extends Abst
rlock.lock();
try {
if (store.existIndexByName(
- indexDesc.getTableIdentifier().getDatabaseName(),
+ databaseName,
- indexDesc.getIndexName())) {
- throw new AlreadyExistsIndexException(indexDesc.getIndexName());
+ indexDesc.getName())) {
+ throw new AlreadyExistsIndexException(indexDesc.getName());
}
store.createIndex(indexDesc);
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/MemStore.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/GlobalPlanner.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanPreprocessor.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
----------------------------------------------------------------------
diff --cc tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
index 2646c18,9002f28..3604e06
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
@@@ -37,10 -37,8 +37,11 @@@ import org.apache.tajo.algebra.WindowSp
import org.apache.tajo.catalog.*;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.IndexMethod;
+ import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
import org.apache.tajo.common.TajoDataTypes;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.conf.TajoConf.ConfVars;
import org.apache.tajo.datum.NullDatum;
import org.apache.tajo.plan.LogicalPlan.QueryBlock;
import org.apache.tajo.plan.algebra.BaseAlgebraVisitor;
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/logical/NodeType.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/ProjectionPushDownRule.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/BasicLogicalPlanVisitor.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/ExplainLogicalPlanVisitor.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/8e52ed43/tajo-plan/src/main/java/org/apache/tajo/plan/visitor/LogicalPlanVisitor.java
----------------------------------------------------------------------
[7/8] tajo git commit: added a missing commit log to CHANGES
Posted by ji...@apache.org.
added a missing commit log to CHANGES
Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/5a97079c
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/5a97079c
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/5a97079c
Branch: refs/heads/index_support
Commit: 5a97079cef13803a636de58e3e0bbcee568bc3ca
Parents: 021a6f0
Author: Jihun Kang <ji...@apache.org>
Authored: Wed Dec 31 00:07:05 2014 +0900
Committer: Jihun Kang <ji...@apache.org>
Committed: Wed Dec 31 00:07:05 2014 +0900
----------------------------------------------------------------------
CHANGES | 3 +++
1 file changed, 3 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/5a97079c/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 25d6e55..653cdfc 100644
--- a/CHANGES
+++ b/CHANGES
@@ -27,6 +27,9 @@ Release 0.9.1 - unreleased
IMPROVEMENT
+ TAJO-1176: Implements queryable virtual tables for catalog information
+ (jihun)
+
TAJO-269: Protocol buffer De/Serialization for LogicalNode. (hyunsik)
TAJO-1266: Too many logs when writing a parquet relation.
[4/8] tajo git commit: TAJO-269: Protocol buffer De/Serialization for
LogicalNode.
Posted by ji...@apache.org.
TAJO-269: Protocol buffer De/Serialization for LogicalNode.
Closes #322
Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/32be38d4
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/32be38d4
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/32be38d4
Branch: refs/heads/index_support
Commit: 32be38d41affc498b01286938f3fea89a8def1a9
Parents: 6fde9e5
Author: Hyunsik Choi <hy...@apache.org>
Authored: Tue Dec 30 21:52:53 2014 +0900
Committer: Hyunsik Choi <hy...@apache.org>
Committed: Tue Dec 30 21:53:29 2014 +0900
----------------------------------------------------------------------
CHANGES | 2 +
.../java/org/apache/tajo/catalog/Schema.java | 33 +-
.../java/org/apache/tajo/catalog/TableDesc.java | 2 +-
.../java/org/apache/tajo/conf/TajoConf.java | 20 +
.../java/org/apache/tajo/util/ProtoUtil.java | 19 +
.../org/apache/tajo/util/ReflectionUtil.java | 61 +-
.../main/java/org/apache/tajo/util/TUtil.java | 41 ++
.../engine/codegen/ExecutorPreCompiler.java | 8 +-
.../engine/planner/PhysicalPlannerImpl.java | 17 +-
.../tajo/engine/planner/enforce/Enforcer.java | 12 +-
.../engine/planner/global/GlobalPlanner.java | 36 +-
.../global/builder/DistinctGroupbyBuilder.java | 32 +-
.../BaseGlobalPlanRewriteRuleProvider.java | 39 +
.../rewriter/GlobalPlanRewriteEngine.java | 84 +++
.../global/rewriter/GlobalPlanRewriteRule.java | 49 ++
.../rewriter/GlobalPlanRewriteRuleProvider.java | 33 +
.../rewriter/GlobalPlanTestRuleProvider.java | 44 ++
.../rules/GlobalPlanEqualityTester.java | 63 ++
.../DistinctGroupbyFirstAggregationExec.java | 2 +-
.../DistinctGroupbyHashAggregationExec.java | 4 +-
.../DistinctGroupbySecondAggregationExec.java | 2 +-
.../DistinctGroupbySortAggregationExec.java | 4 +-
.../DistinctGroupbyThirdAggregationExec.java | 2 +-
.../apache/tajo/engine/query/TaskRequest.java | 3 +-
.../tajo/engine/query/TaskRequestImpl.java | 28 +-
.../utils/test/ErrorInjectionRewriter.java | 10 +-
.../tajo/master/DefaultTaskScheduler.java | 12 +-
.../org/apache/tajo/master/GlobalEngine.java | 1 +
.../apache/tajo/master/exec/QueryExecutor.java | 6 +-
.../master/querymaster/QueryMasterTask.java | 9 +-
.../main/java/org/apache/tajo/worker/Task.java | 4 +-
.../src/main/proto/TajoWorkerProtocol.proto | 14 +-
.../org/apache/tajo/TajoTestingCluster.java | 15 +-
.../apache/tajo/engine/eval/ExprTestBase.java | 14 +-
.../tajo/engine/query/TestGroupByQuery.java | 59 +-
.../tajo/engine/query/TestSelectQuery.java | 27 +-
.../tajo/engine/query/TestTruncateTable.java | 8 +-
.../tajo/engine/query/TestWindowQuery.java | 6 +-
.../apache/tajo/master/TestGlobalPlanner.java | 6 +-
.../org/apache/tajo/plan/LogicalOptimizer.java | 52 +-
.../tajo/plan/LogicalPlanPreprocessor.java | 4 +-
.../org/apache/tajo/plan/LogicalPlanner.java | 13 +-
.../main/java/org/apache/tajo/plan/Target.java | 13 +-
.../plan/expr/AggregationFunctionCallEval.java | 38 +-
.../org/apache/tajo/plan/expr/EvalNode.java | 10 +-
.../tajo/plan/expr/WindowFunctionEval.java | 12 +
.../tajo/plan/logical/AlterTableNode.java | 10 +
.../tajo/plan/logical/AlterTablespaceNode.java | 13 +-
.../apache/tajo/plan/logical/BinaryNode.java | 16 +
.../tajo/plan/logical/CreateDatabaseNode.java | 10 +
.../tajo/plan/logical/CreateTableNode.java | 11 +-
.../tajo/plan/logical/DistinctGroupbyNode.java | 39 +-
.../tajo/plan/logical/DropDatabaseNode.java | 17 +-
.../apache/tajo/plan/logical/DropTableNode.java | 10 +
.../apache/tajo/plan/logical/EvalExprNode.java | 12 +-
.../apache/tajo/plan/logical/GroupbyNode.java | 47 +-
.../apache/tajo/plan/logical/InsertNode.java | 15 +-
.../apache/tajo/plan/logical/LogicalNode.java | 16 +-
.../org/apache/tajo/plan/logical/NodeType.java | 6 +-
.../tajo/plan/logical/ProjectionNode.java | 12 +-
.../apache/tajo/plan/logical/RelationNode.java | 2 +-
.../org/apache/tajo/plan/logical/ScanNode.java | 13 +-
.../tajo/plan/logical/SetSessionNode.java | 22 +-
.../tajo/plan/logical/StoreTableNode.java | 11 +-
.../tajo/plan/logical/TableSubQueryNode.java | 12 +-
.../tajo/plan/logical/TruncateTableNode.java | 10 +
.../org/apache/tajo/plan/logical/UnaryNode.java | 14 +
.../apache/tajo/plan/logical/WindowSpec.java | 57 +-
.../tajo/plan/nameresolver/NameResolver.java | 6 +-
.../plan/nameresolver/ResolverByLegacy.java | 2 +-
.../rewrite/BaseLogicalPlanRewriteEngine.java | 89 +++
.../BaseLogicalPlanRewriteRuleProvider.java | 59 ++
.../plan/rewrite/BasicQueryRewriteEngine.java | 72 --
.../plan/rewrite/LogicalPlanRewriteEngine.java | 33 +
.../plan/rewrite/LogicalPlanRewriteRule.java | 57 ++
.../rewrite/LogicalPlanRewriteRuleProvider.java | 44 ++
.../rewrite/LogicalPlanTestRuleProvider.java | 44 ++
.../tajo/plan/rewrite/QueryRewriteEngine.java | 32 -
.../apache/tajo/plan/rewrite/RewriteRule.java | 56 --
.../plan/rewrite/rules/FilterPushDownRule.java | 9 +-
.../rules/LogicalPlanEqualityTester.java | 55 ++
.../rewrite/rules/PartitionedTableRewriter.java | 44 +-
.../rewrite/rules/ProjectionPushDownRule.java | 11 +-
.../tajo/plan/serder/EvalNodeDeserializer.java | 301 ++++++++
.../tajo/plan/serder/EvalNodeSerializer.java | 397 ++++++++++
.../plan/serder/EvalTreeProtoDeserializer.java | 218 ------
.../plan/serder/EvalTreeProtoSerializer.java | 310 --------
.../plan/serder/LogicalNodeDeserializer.java | 678 +++++++++++++++++
.../tajo/plan/serder/LogicalNodeSerializer.java | 724 +++++++++++++++++++
.../apache/tajo/plan/serder/package-info.java | 23 +
.../org/apache/tajo/plan/util/PlannerUtil.java | 16 +-
.../plan/visitor/BasicLogicalPlanVisitor.java | 39 +-
.../plan/visitor/ExplainLogicalPlanVisitor.java | 5 +-
.../tajo/plan/visitor/LogicalPlanVisitor.java | 7 +-
tajo-plan/src/main/proto/Plan.proto | 363 ++++++++--
.../org/apache/tajo/storage/StorageManager.java | 4 +-
.../storage/hbase/AddSortForInsertRewriter.java | 10 +-
.../tajo/storage/hbase/HBaseStorageManager.java | 6 +-
98 files changed, 3960 insertions(+), 1102 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 013bb25..25d6e55 100644
--- a/CHANGES
+++ b/CHANGES
@@ -27,6 +27,8 @@ Release 0.9.1 - unreleased
IMPROVEMENT
+ TAJO-269: Protocol buffer De/Serialization for LogicalNode. (hyunsik)
+
TAJO-1266: Too many logs when writing a parquet relation.
(DaeMyung Kang via jihoon)
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
index 672b8e3..71c1b01 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
+++ b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/Schema.java
@@ -199,24 +199,21 @@ public class Schema implements ProtoObject<SchemaProto>, Cloneable, GsonObject {
}
public int getColumnId(String name) {
- String [] parts = name.split("\\.");
- if (parts.length == 2 || parts.length == 3) {
- if (fieldsByQualifiedName.containsKey(name)) {
- return fieldsByQualifiedName.get(name);
- } else {
- return -1;
- }
- } else {
- List<Integer> list = fieldsByName.get(name);
- if (list == null) {
- return -1;
- } else if (list.size() == 1) {
- return fieldsByName.get(name).get(0);
- } else if (list.size() == 0) {
- return -1;
- } else { // if list.size > 2
- throw throwAmbiguousFieldException(list);
- }
+ // if the same column exists, immediately return that column.
+ if (fieldsByQualifiedName.containsKey(name)) {
+ return fieldsByQualifiedName.get(name);
+ }
+
+ // The following is some workaround code.
+ List<Integer> list = fieldsByName.get(name);
+ if (list == null) {
+ return -1;
+ } else if (list.size() == 1) {
+ return fieldsByName.get(name).get(0);
+ } else if (list.size() == 0) {
+ return -1;
+ } else { // if list.size > 2
+ throw throwAmbiguousFieldException(list);
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableDesc.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableDesc.java b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableDesc.java
index ce167e1..ec679f9 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableDesc.java
+++ b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/TableDesc.java
@@ -168,7 +168,7 @@ public class TableDesc implements ProtoObject<TableDescProto>, GsonObject, Clone
boolean eq = tableName.equals(other.tableName);
eq = eq && schema.equals(other.schema);
eq = eq && meta.equals(other.meta);
- eq = eq && uri.equals(other.uri);
+ eq = eq && TUtil.checkEquals(uri, other.uri);
eq = eq && TUtil.checkEquals(partitionMethodDesc, other.partitionMethodDesc);
eq = eq && TUtil.checkEquals(external, other.external);
return eq && TUtil.checkEquals(stats, other.stats);
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java b/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
index d0c6460..ab11ddd 100644
--- a/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
+++ b/tajo-common/src/main/java/org/apache/tajo/conf/TajoConf.java
@@ -249,6 +249,12 @@ public class TajoConf extends Configuration {
TASK_DEFAULT_SIZE("tajo.task.size-mb", 128),
// Query and Optimization -------------------------------------------------
+ // This class provides a ordered list of logical plan rewrite rule classes.
+ LOGICAL_PLAN_REWRITE_RULE_PROVIDER_CLASS("tajo.plan.logical.rewriter.provider",
+ "org.apache.tajo.plan.rewrite.BaseLogicalPlanRewriteRuleProvider"),
+ // This class provides a ordered list of global plan rewrite rule classes.
+ GLOBAL_PLAN_REWRITE_RULE_PROVIDER_CLASS("tajo.plan.global.rewriter.provider",
+ "org.apache.tajo.engine.planner.global.rewriter.BaseGlobalPlanRewriteRuleProvider"),
EXECUTOR_EXTERNAL_SORT_THREAD_NUM("tajo.executor.external-sort.thread-num", 1),
EXECUTOR_EXTERNAL_SORT_FANOUT("tajo.executor.external-sort.fanout-num", 8),
@@ -561,6 +567,20 @@ public class TajoConf extends Configuration {
setBoolVar(this, var, val);
}
+ public void setClassVar(ConfVars var, Class<?> clazz) {
+ setVar(var, clazz.getCanonicalName());
+ }
+
+ public Class<?> getClassVar(ConfVars var) {
+ String valueString = getVar(var);
+
+ try {
+ return getClassByName(valueString);
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
public static String getVar(Configuration conf, ConfVars var) {
return conf.get(var.varname, var.defaultVal);
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-common/src/main/java/org/apache/tajo/util/ProtoUtil.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/util/ProtoUtil.java b/tajo-common/src/main/java/org/apache/tajo/util/ProtoUtil.java
index dbc987d..f9d759b 100644
--- a/tajo-common/src/main/java/org/apache/tajo/util/ProtoUtil.java
+++ b/tajo-common/src/main/java/org/apache/tajo/util/ProtoUtil.java
@@ -18,7 +18,11 @@
package org.apache.tajo.util;
+import com.google.common.collect.Lists;
+import org.apache.tajo.common.ProtoObject;
+
import java.util.Collection;
+import java.util.List;
import java.util.Map;
import static org.apache.tajo.rpc.protocolrecords.PrimitiveProtos.*;
@@ -52,4 +56,19 @@ public class ProtoUtil {
public static KeyValueSetProto convertFromMap(Map<String, String> map) {
return new KeyValueSet(map).getProto();
}
+
+ /**
+ * It converts an array of ProtoObjects into Iteratable one.
+ *
+ * @param protoObjects
+ * @param <T>
+ * @return
+ */
+ public static <T> Iterable<T> toProtoObjects(ProtoObject[] protoObjects) {
+ List<T> converted = Lists.newArrayList();
+ for (int i = 0; i < protoObjects.length; i++) {
+ converted.add((T) protoObjects[i].getProto());
+ }
+ return converted;
+ }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-common/src/main/java/org/apache/tajo/util/ReflectionUtil.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/util/ReflectionUtil.java b/tajo-common/src/main/java/org/apache/tajo/util/ReflectionUtil.java
index eccc61f..e2def69 100644
--- a/tajo-common/src/main/java/org/apache/tajo/util/ReflectionUtil.java
+++ b/tajo-common/src/main/java/org/apache/tajo/util/ReflectionUtil.java
@@ -18,22 +18,71 @@
package org.apache.tajo.util;
+import org.apache.tajo.conf.TajoConf;
+
import java.lang.reflect.Constructor;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class ReflectionUtil {
- private static final Class<?>[] EMPTY_ARRAY = new Class[]{};
+ private static final Class<?>[] EMPTY_PARAM = new Class[]{};
+ private static final Object [] EMPTY_OBJECT = new Object[] {};
+ private static final Class<?>[] CONF_PARAM = new Class[]{TajoConf.class};
/**
- * Cache of constructors for each class. Pins the classes so they
+ * Caches of constructors for each class. Pins the classes so they
* can't be garbage collected until ReflectionUtils can be collected.
+ *
+ * EMPTY_CONSTRUCTOR_CACHE keeps classes which don't have any parameterized constructor, and
+ * CONF_CONSTRUCTOR_CACHE keeps classes which have one constructor to take TajoConf.
*/
- private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE =
+ private static final Map<Class<?>, Constructor<?>> EMPTY_CONSTRUCTOR_CACHE =
+ new ConcurrentHashMap<Class<?>, Constructor<?>>();
+ private static final Map<Class<?>, Constructor<?>> CONF_CONSTRUCTOR_CACHE =
new ConcurrentHashMap<Class<?>, Constructor<?>>();
- public static Object newInstance(Class<?> clazz)
- throws InstantiationException, IllegalAccessException {
- return clazz.newInstance();
+ /**
+ * Initialize an instance by a given class
+ *
+ * @param clazz Class to be initialized
+ * @return initialized object
+ */
+ public static <T> T newInstance(Class<? extends T> clazz) {
+ try {
+ Constructor<?> constructor;
+ if (EMPTY_CONSTRUCTOR_CACHE.containsKey(clazz)) {
+ constructor = EMPTY_CONSTRUCTOR_CACHE.get(clazz);
+ } else {
+ constructor = clazz.getConstructor(EMPTY_PARAM);
+ EMPTY_CONSTRUCTOR_CACHE.put(clazz, constructor);
+ }
+
+ return (T) constructor.newInstance(EMPTY_OBJECT);
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
}
+
+ /**
+ * Initialize an instance by a given class with TajoConf parameter
+ *
+ * @param clazz Class to be initialized
+ * @param conf TajoConf instance
+ * @return initialized object
+ */
+ public static <T> T newInstance(Class<? extends T> clazz, TajoConf conf) {
+ try {
+ Constructor<?> constructor;
+ if (CONF_CONSTRUCTOR_CACHE.containsKey(clazz)) {
+ constructor = CONF_CONSTRUCTOR_CACHE.get(clazz);
+ } else {
+ constructor = clazz.getConstructor(CONF_PARAM);
+ CONF_CONSTRUCTOR_CACHE.put(clazz, constructor);
+ }
+
+ return (T) constructor.newInstance(new Object[]{conf});
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-common/src/main/java/org/apache/tajo/util/TUtil.java
----------------------------------------------------------------------
diff --git a/tajo-common/src/main/java/org/apache/tajo/util/TUtil.java b/tajo-common/src/main/java/org/apache/tajo/util/TUtil.java
index 0ceb2b2..a1de860 100644
--- a/tajo-common/src/main/java/org/apache/tajo/util/TUtil.java
+++ b/tajo-common/src/main/java/org/apache/tajo/util/TUtil.java
@@ -42,6 +42,37 @@ public class TUtil {
}
/**
+ * check two collections as equals. It also check the equivalence of null.
+ * It will return true even if they are all null.
+ *
+ * @param s1 the first collection to be compared.
+ * @param s2 the second collection to be compared
+ * @return true if they are equal or all null
+ */
+ public static boolean checkEquals(Collection<?> s1, Collection<?> s2) {
+ if (s1 == null ^ s2 == null) {
+ return false;
+ } else if (s1 == null && s2 == null) {
+ return true;
+ } else {
+ if (s1.size() == 0 && s2.size() == 0) {
+ return true;
+ } else if (s1.size() == s2.size()) {
+ Iterator<?> it1 = s1.iterator();
+ Iterator<?> it2 = s2.iterator();
+ Object o1;
+ Object o2;
+ for (o1 = it1.next(), o2 = it2.next(); it1.hasNext() && it2.hasNext(); o1 = it1.next(), o2 = it2.next()) {
+ if (!o1.equals(o2)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ }
+
+ /**
* check two arrays as equals. It also check the equivalence of null.
* It will return true even if they are all null.
*
@@ -59,6 +90,16 @@ public class TUtil {
}
}
+ public static boolean checkEquals(int [] s1, int [] s2) {
+ if (s1 == null ^ s2 == null) {
+ return false;
+ } else if (s1 == null && s2 == null) {
+ return true;
+ } else {
+ return Arrays.equals(s1, s2);
+ }
+ }
+
public static <T> T[] concat(T[] first, T[] second) {
T[] result = Arrays.copyOf(first, first.length + second.length);
System.arraycopy(second, 0, result, first.length, second.length);
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/codegen/ExecutorPreCompiler.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/codegen/ExecutorPreCompiler.java b/tajo-core/src/main/java/org/apache/tajo/engine/codegen/ExecutorPreCompiler.java
index d588e7f..79513dc 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/codegen/ExecutorPreCompiler.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/codegen/ExecutorPreCompiler.java
@@ -149,9 +149,9 @@ public class ExecutorPreCompiler extends BasicLogicalPlanVisitor<ExecutorPreComp
return node;
}
- public LogicalNode visitDistinct(CompilationContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
- DistinctGroupbyNode node, Stack<LogicalNode> stack) throws PlanningException {
- super.visitDistinct(context, plan, block, node, stack);
+ public LogicalNode visitDistinctGroupby(CompilationContext context, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ DistinctGroupbyNode node, Stack<LogicalNode> stack) throws PlanningException {
+ super.visitDistinctGroupby(context, plan, block, node, stack);
compileProjectableNode(context, node.getInSchema(), node);
return node;
@@ -190,7 +190,7 @@ public class ExecutorPreCompiler extends BasicLogicalPlanVisitor<ExecutorPreComp
if (node.hasTargets()) {
for (Target target : node.getTargets()) {
- compileIfAbsent(context, node.getTableSchema(), target.getEvalTree());
+ compileIfAbsent(context, node.getLogicalSchema(), target.getEvalTree());
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
index 2a34637..d043a27 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/PhysicalPlannerImpl.java
@@ -34,6 +34,7 @@ import org.apache.tajo.catalog.SortSpec;
import org.apache.tajo.catalog.proto.CatalogProtos;
import org.apache.tajo.catalog.proto.CatalogProtos.SortSpecProto;
import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.plan.serder.LogicalNodeDeserializer;
import org.apache.tajo.engine.planner.enforce.Enforcer;
import org.apache.tajo.engine.planner.global.DataChannel;
import org.apache.tajo.engine.planner.physical.*;
@@ -877,7 +878,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
TajoWorkerProtocol.SortedInputEnforce sortEnforcer = property.get(0).getSortedInput();
boolean condition = scanNode.getTableName().equals(sortEnforcer.getTableName());
- SortSpec [] sortSpecs = PlannerUtil.convertSortSpecs(sortEnforcer.getSortSpecsList());
+ SortSpec [] sortSpecs = LogicalNodeDeserializer.convertSortSpecs(sortEnforcer.getSortSpecsList());
return condition && TUtil.checkEquals(sortNode.getSortKeys(), sortSpecs);
} else {
return false;
@@ -1089,7 +1090,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
if (phase == 3) {
sortSpecs.add(new SortSpec(distinctNode.getTargets()[0].getNamedColumn()));
}
- for (GroupbyNode eachGroupbyNode: distinctNode.getGroupByNodes()) {
+ for (GroupbyNode eachGroupbyNode: distinctNode.getSubPlans()) {
for (Column eachColumn: eachGroupbyNode.getGroupingColumns()) {
sortSpecs.add(new SortSpec(eachColumn));
}
@@ -1110,7 +1111,7 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
private PhysicalExec createSortAggregationDistinctGroupbyExec(TaskAttemptContext ctx,
DistinctGroupbyNode distinctGroupbyNode, PhysicalExec subOp,
DistinctGroupbyEnforcer enforcer) throws IOException {
- List<GroupbyNode> groupbyNodes = distinctGroupbyNode.getGroupByNodes();
+ List<GroupbyNode> groupbyNodes = distinctGroupbyNode.getSubPlans();
SortAggregateExec[] sortAggregateExec = new SortAggregateExec[groupbyNodes.size()];
@@ -1216,15 +1217,15 @@ public class PhysicalPlannerImpl implements PhysicalPlanner {
List<EnforceProperty> properties = enforcer.getEnforceProperties(type);
EnforceProperty found = null;
for (EnforceProperty property : properties) {
- if (type == EnforceType.JOIN && property.getJoin().getPid() == node.getPID()) {
+ if (type == EnforceType.JOIN && property.getJoin().getNodeId() == node.getPID()) {
found = property;
- } else if (type == EnforceType.GROUP_BY && property.getGroupby().getPid() == node.getPID()) {
+ } else if (type == EnforceType.GROUP_BY && property.getGroupby().getNodeId() == node.getPID()) {
found = property;
- } else if (type == EnforceType.DISTINCT_GROUP_BY && property.getDistinct().getPid() == node.getPID()) {
+ } else if (type == EnforceType.DISTINCT_GROUP_BY && property.getDistinct().getNodeId() == node.getPID()) {
found = property;
- } else if (type == EnforceType.SORT && property.getSort().getPid() == node.getPID()) {
+ } else if (type == EnforceType.SORT && property.getSort().getNodeId() == node.getPID()) {
found = property;
- } else if (type == EnforceType.COLUMN_PARTITION && property.getColumnPartition().getPid() == node.getPID()) {
+ } else if (type == EnforceType.COLUMN_PARTITION && property.getColumnPartition().getNodeId() == node.getPID()) {
found = property;
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/enforce/Enforcer.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/enforce/Enforcer.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/enforce/Enforcer.java
index e2d7744..8128390 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/enforce/Enforcer.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/enforce/Enforcer.java
@@ -98,7 +98,7 @@ public class Enforcer implements ProtoObject<EnforcerProto> {
public void enforceJoinAlgorithm(int pid, JoinEnforce.JoinAlgorithm algorithm) {
EnforceProperty.Builder builder = newProperty();
JoinEnforce.Builder enforce = JoinEnforce.newBuilder();
- enforce.setPid(pid);
+ enforce.setNodeId(pid);
enforce.setAlgorithm(algorithm);
builder.setType(EnforceType.JOIN);
@@ -109,7 +109,7 @@ public class Enforcer implements ProtoObject<EnforcerProto> {
public void enforceSortAggregation(int pid, @Nullable SortSpec[] sortSpecs) {
EnforceProperty.Builder builder = newProperty();
GroupbyEnforce.Builder enforce = GroupbyEnforce.newBuilder();
- enforce.setPid(pid);
+ enforce.setNodeId(pid);
enforce.setAlgorithm(GroupbyAlgorithm.SORT_AGGREGATION);
if (sortSpecs != null) {
for (SortSpec sortSpec : sortSpecs) {
@@ -125,7 +125,7 @@ public class Enforcer implements ProtoObject<EnforcerProto> {
public void enforceHashAggregation(int pid) {
EnforceProperty.Builder builder = newProperty();
GroupbyEnforce.Builder enforce = GroupbyEnforce.newBuilder();
- enforce.setPid(pid);
+ enforce.setNodeId(pid);
enforce.setAlgorithm(GroupbyAlgorithm.HASH_AGGREGATION);
builder.setType(EnforceType.GROUP_BY);
@@ -146,7 +146,7 @@ public class Enforcer implements ProtoObject<EnforcerProto> {
List<SortSpecArray> sortSpecArrays) {
EnforceProperty.Builder builder = newProperty();
DistinctGroupbyEnforcer.Builder enforce = DistinctGroupbyEnforcer.newBuilder();
- enforce.setPid(pid);
+ enforce.setNodeId(pid);
enforce.setIsMultipleAggregation(isMultipleAggregation);
enforce.setAlgorithm(algorithm);
if (sortSpecArrays != null) {
@@ -164,7 +164,7 @@ public class Enforcer implements ProtoObject<EnforcerProto> {
public void enforceSortAlgorithm(int pid, SortEnforce.SortAlgorithm algorithm) {
EnforceProperty.Builder builder = newProperty();
SortEnforce.Builder enforce = SortEnforce.newBuilder();
- enforce.setPid(pid);
+ enforce.setNodeId(pid);
enforce.setAlgorithm(algorithm);
builder.setType(EnforceType.SORT);
@@ -203,7 +203,7 @@ public class Enforcer implements ProtoObject<EnforcerProto> {
public void enforceColumnPartitionAlgorithm(int pid, ColumnPartitionAlgorithm algorithm) {
EnforceProperty.Builder builder = newProperty();
ColumnPartitionEnforcer.Builder enforce = ColumnPartitionEnforcer.newBuilder();
- enforce.setPid(pid);
+ enforce.setNodeId(pid);
enforce.setAlgorithm(algorithm);
builder.setType(EnforceType.COLUMN_PARTITION);
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/GlobalPlanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/GlobalPlanner.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/GlobalPlanner.java
index c75b348..6c3e3b8 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/GlobalPlanner.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/GlobalPlanner.java
@@ -18,6 +18,7 @@
package org.apache.tajo.engine.planner.global;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -36,17 +37,20 @@ import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.engine.planner.BroadcastJoinMarkCandidateVisitor;
import org.apache.tajo.engine.planner.BroadcastJoinPlanVisitor;
import org.apache.tajo.engine.planner.global.builder.DistinctGroupbyBuilder;
+import org.apache.tajo.engine.planner.global.rewriter.GlobalPlanRewriteEngine;
+import org.apache.tajo.engine.planner.global.rewriter.GlobalPlanRewriteRuleProvider;
import org.apache.tajo.exception.InternalException;
import org.apache.tajo.plan.LogicalPlan;
-import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.plan.PlanningException;
import org.apache.tajo.plan.Target;
import org.apache.tajo.plan.expr.*;
import org.apache.tajo.plan.function.AggFunction;
import org.apache.tajo.plan.logical.*;
import org.apache.tajo.plan.rewrite.rules.ProjectionPushDownRule;
+import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.plan.visitor.BasicLogicalPlanVisitor;
import org.apache.tajo.util.KeyValueSet;
+import org.apache.tajo.util.ReflectionUtil;
import org.apache.tajo.util.TUtil;
import org.apache.tajo.worker.TajoWorker;
@@ -54,6 +58,7 @@ import java.io.IOException;
import java.util.*;
import static org.apache.tajo.conf.TajoConf.ConfVars;
+import static org.apache.tajo.conf.TajoConf.ConfVars.GLOBAL_PLAN_REWRITE_RULE_PROVIDER_CLASS;
import static org.apache.tajo.plan.serder.PlanProto.ShuffleType.*;
/**
@@ -64,34 +69,29 @@ public class GlobalPlanner {
private final TajoConf conf;
private final CatalogProtos.StoreType storeType;
- private CatalogService catalog;
- private TajoWorker.WorkerContext workerContext;
+ private final CatalogService catalog;
+ private final GlobalPlanRewriteEngine rewriteEngine;
+ @VisibleForTesting
public GlobalPlanner(final TajoConf conf, final CatalogService catalog) throws IOException {
this.conf = conf;
this.catalog = catalog;
this.storeType = CatalogProtos.StoreType.valueOf(conf.getVar(ConfVars.SHUFFLE_FILE_FORMAT).toUpperCase());
Preconditions.checkArgument(storeType != null);
+
+ Class<? extends GlobalPlanRewriteRuleProvider> clazz =
+ (Class<? extends GlobalPlanRewriteRuleProvider>) conf.getClassVar(GLOBAL_PLAN_REWRITE_RULE_PROVIDER_CLASS);
+ GlobalPlanRewriteRuleProvider provider = ReflectionUtil.newInstance(clazz, conf);
+ rewriteEngine = new GlobalPlanRewriteEngine();
+ rewriteEngine.addRewriteRule(provider.getRules());
}
public GlobalPlanner(final TajoConf conf, final TajoWorker.WorkerContext workerContext) throws IOException {
- this.conf = conf;
- this.workerContext = workerContext;
- this.storeType = CatalogProtos.StoreType.valueOf(conf.getVar(ConfVars.SHUFFLE_FILE_FORMAT).toUpperCase());
- Preconditions.checkArgument(storeType != null);
+ this(conf, workerContext.getCatalog());
}
- /**
- * TODO: this is hack. it must be refactored at TAJO-602.
- */
public CatalogService getCatalog() {
- if (workerContext.getCatalog() != null) {
- return workerContext.getCatalog();
- } else if (catalog != null) {
- return catalog;
- } else {
- throw new IllegalStateException("No Catalog Instance");
- }
+ return catalog;
}
public CatalogProtos.StoreType getStoreType() {
@@ -163,6 +163,8 @@ public class GlobalPlanner {
masterPlan.setTerminal(terminalBlock);
LOG.info("\n" + masterPlan.toString());
+
+ masterPlan = rewriteEngine.rewrite(masterPlan);
}
private static void setFinalOutputChannel(DataChannel outputChannel, Schema outputSchema) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/builder/DistinctGroupbyBuilder.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/builder/DistinctGroupbyBuilder.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/builder/DistinctGroupbyBuilder.java
index 671bb19..5c6e80e 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/builder/DistinctGroupbyBuilder.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/builder/DistinctGroupbyBuilder.java
@@ -99,7 +99,7 @@ public class DistinctGroupbyBuilder {
DistinctGroupbyNode thirdStageDistinctNode = PlannerUtil.clone(plan, baseDistinctNode);
// Set second, third non-distinct aggregation's eval node to field eval
- GroupbyNode lastGroupbyNode = secondStageDistinctNode.getGroupByNodes().get(secondStageDistinctNode.getGroupByNodes().size() - 1);
+ GroupbyNode lastGroupbyNode = secondStageDistinctNode.getSubPlans().get(secondStageDistinctNode.getSubPlans().size() - 1);
if (!lastGroupbyNode.isDistinct()) {
int index = 0;
for (AggregationFunctionCallEval aggrFunction: lastGroupbyNode.getAggFunctions()) {
@@ -108,7 +108,7 @@ public class DistinctGroupbyBuilder {
index++;
}
}
- lastGroupbyNode = thirdStageDistinctNode.getGroupByNodes().get(thirdStageDistinctNode.getGroupByNodes().size() - 1);
+ lastGroupbyNode = thirdStageDistinctNode.getSubPlans().get(thirdStageDistinctNode.getSubPlans().size() - 1);
if (!lastGroupbyNode.isDistinct()) {
int index = 0;
for (AggregationFunctionCallEval aggrFunction: lastGroupbyNode.getAggFunctions()) {
@@ -300,11 +300,11 @@ public class DistinctGroupbyBuilder {
DistinctGroupbyNode baseDistinctNode = new DistinctGroupbyNode(context.getPlan().getLogicalPlan().newPID());
baseDistinctNode.setTargets(baseGroupByTargets.toArray(new Target[]{}));
- baseDistinctNode.setGroupColumns(groupbyNode.getGroupingColumns());
+ baseDistinctNode.setGroupingColumns(groupbyNode.getGroupingColumns());
baseDistinctNode.setInSchema(groupbyNode.getInSchema());
baseDistinctNode.setChild(groupbyNode.getChild());
- baseDistinctNode.setGroupbyNodes(childGroupbyNodes);
+ baseDistinctNode.setSubPlans(childGroupbyNodes);
return baseDistinctNode;
}
@@ -468,11 +468,11 @@ public class DistinctGroupbyBuilder {
DistinctGroupbyNode baseDistinctNode = new DistinctGroupbyNode(context.getPlan().getLogicalPlan().newPID());
baseDistinctNode.setTargets(groupbyNode.getTargets());
- baseDistinctNode.setGroupColumns(groupbyNode.getGroupingColumns());
+ baseDistinctNode.setGroupingColumns(groupbyNode.getGroupingColumns());
baseDistinctNode.setInSchema(groupbyNode.getInSchema());
baseDistinctNode.setChild(groupbyNode.getChild());
- baseDistinctNode.setGroupbyNodes(childGroupbyNodes);
+ baseDistinctNode.setSubPlans(childGroupbyNodes);
return baseDistinctNode;
}
@@ -529,12 +529,12 @@ public class DistinctGroupbyBuilder {
// - Change SecondStage's aggregation expr and target column name. For example:
// exprs: (sum(default.lineitem.l_quantity (FLOAT8))) ==> exprs: (sum(?sum_3 (FLOAT8)))
int grpIdx = 0;
- for (GroupbyNode firstStageGroupbyNode: firstStageDistinctNode.getGroupByNodes()) {
- GroupbyNode secondStageGroupbyNode = secondStageDistinctNode.getGroupByNodes().get(grpIdx);
+ for (GroupbyNode firstStageGroupbyNode: firstStageDistinctNode.getSubPlans()) {
+ GroupbyNode secondStageGroupbyNode = secondStageDistinctNode.getSubPlans().get(grpIdx);
if (firstStageGroupbyNode.isDistinct()) {
// FirstStage: Remove aggregation, Set target with only grouping columns
- firstStageGroupbyNode.setAggFunctions(null);
+ firstStageGroupbyNode.setAggFunctions(PlannerUtil.EMPTY_AGG_FUNCS);
List<Target> firstGroupbyTargets = new ArrayList<Target>();
for (Column column : firstStageGroupbyNode.getGroupingColumns()) {
@@ -614,7 +614,7 @@ public class DistinctGroupbyBuilder {
// In the case of distinct query without group by clause
// other aggregation function is added to last distinct group by node.
- List<GroupbyNode> secondStageGroupbyNodes = secondStageDistinctNode.getGroupByNodes();
+ List<GroupbyNode> secondStageGroupbyNodes = secondStageDistinctNode.getSubPlans();
GroupbyNode lastSecondStageGroupbyNode = secondStageGroupbyNodes.get(secondStageGroupbyNodes.size() - 1);
if (!lastSecondStageGroupbyNode.isDistinct() && lastSecondStageGroupbyNode.isEmptyGrouping()) {
GroupbyNode otherGroupbyNode = lastSecondStageGroupbyNode;
@@ -644,7 +644,7 @@ public class DistinctGroupbyBuilder {
List<Integer> firstStageColumnIds = new ArrayList<Integer>();
columnIdIndex = 0;
List<Target> firstTargets = new ArrayList<Target>();
- for (GroupbyNode firstStageGroupbyNode: firstStageDistinctNode.getGroupByNodes()) {
+ for (GroupbyNode firstStageGroupbyNode: firstStageDistinctNode.getSubPlans()) {
if (firstStageGroupbyNode.isDistinct()) {
for (Column column : firstStageGroupbyNode.getGroupingColumns()) {
Target firstTarget = new Target(new FieldEval(column));
@@ -674,7 +674,7 @@ public class DistinctGroupbyBuilder {
Schema secondStageInSchema = new Schema();
//TODO merged tuple schema
int index = 0;
- for(GroupbyNode eachNode: secondStageDistinctNode.getGroupByNodes()) {
+ for(GroupbyNode eachNode: secondStageDistinctNode.getSubPlans()) {
eachNode.setInSchema(firstStageDistinctNode.getOutSchema());
for (Column column: eachNode.getOutSchema().getColumns()) {
if (secondStageInSchema.getColumn(column) == null) {
@@ -695,13 +695,13 @@ public class DistinctGroupbyBuilder {
List<SortSpecArray> sortSpecArrays = new ArrayList<SortSpecArray>();
int index = 0;
- for (GroupbyNode groupbyNode: firstStageDistinctNode.getGroupByNodes()) {
+ for (GroupbyNode groupbyNode: firstStageDistinctNode.getSubPlans()) {
List<SortSpecProto> sortSpecs = new ArrayList<SortSpecProto>();
for (Column column: groupbyNode.getGroupingColumns()) {
sortSpecs.add(SortSpecProto.newBuilder().setColumn(column.getProto()).build());
}
sortSpecArrays.add( SortSpecArray.newBuilder()
- .setPid(secondStageDistinctNode.getGroupByNodes().get(index).getPID())
+ .setNodeId(secondStageDistinctNode.getSubPlans().get(index).getPID())
.addAllSortSpecs(sortSpecs).build());
}
secondStageBlock.getEnforcer().enforceDistinctAggregation(secondStageDistinctNode.getPID(),
@@ -723,13 +723,13 @@ public class DistinctGroupbyBuilder {
List<SortSpecArray> sortSpecArrays = new ArrayList<SortSpecArray>();
int index = 0;
- for (GroupbyNode groupbyNode: firstStageDistinctNode.getGroupByNodes()) {
+ for (GroupbyNode groupbyNode: firstStageDistinctNode.getSubPlans()) {
List<SortSpecProto> sortSpecs = new ArrayList<SortSpecProto>();
for (Column column: groupbyNode.getGroupingColumns()) {
sortSpecs.add(SortSpecProto.newBuilder().setColumn(column.getProto()).build());
}
sortSpecArrays.add( SortSpecArray.newBuilder()
- .setPid(thirdStageDistinctNode.getGroupByNodes().get(index).getPID())
+ .setNodeId(thirdStageDistinctNode.getSubPlans().get(index).getPID())
.addAllSortSpecs(sortSpecs).build());
}
thirdStageBlock.getEnforcer().enforceDistinctAggregation(thirdStageDistinctNode.getPID(),
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/BaseGlobalPlanRewriteRuleProvider.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/BaseGlobalPlanRewriteRuleProvider.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/BaseGlobalPlanRewriteRuleProvider.java
new file mode 100644
index 0000000..96ee2c6
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/BaseGlobalPlanRewriteRuleProvider.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.global.rewriter;
+
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.util.TUtil;
+
+import java.util.Collection;
+import java.util.List;
+
+@SuppressWarnings("unused")
+public class BaseGlobalPlanRewriteRuleProvider extends GlobalPlanRewriteRuleProvider {
+ private static final List<Class<? extends GlobalPlanRewriteRule>> EMPTY_RULES = TUtil.newList();
+
+ public BaseGlobalPlanRewriteRuleProvider(TajoConf conf) {
+ super(conf);
+ }
+
+ @Override
+ public Collection<Class<? extends GlobalPlanRewriteRule>> getRules() {
+ return EMPTY_RULES;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteEngine.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteEngine.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteEngine.java
new file mode 100644
index 0000000..c01ed0e
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteEngine.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.global.rewriter;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.tajo.engine.planner.global.MasterPlan;
+import org.apache.tajo.plan.PlanningException;
+import org.apache.tajo.util.ReflectionUtil;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+public class GlobalPlanRewriteEngine {
+ /** class logger */
+ private static final Log LOG = LogFactory.getLog(GlobalPlanRewriteEngine.class);
+
+ /** a map for query rewrite rules */
+ private final Map<String, GlobalPlanRewriteRule> rewriteRules = new LinkedHashMap<String, GlobalPlanRewriteRule>();
+
+ /**
+ * Add a query rewrite rule to this engine.
+ *
+ * @param rules Rule classes
+ */
+ public void addRewriteRule(Iterable<Class<? extends GlobalPlanRewriteRule>> rules) {
+ for (Class<? extends GlobalPlanRewriteRule> clazz : rules) {
+ try {
+ GlobalPlanRewriteRule rule = ReflectionUtil.newInstance(clazz);
+ addRewriteRule(rule);
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+ }
+
+ /**
+ * Add a query rewrite rule to this engine.
+ *
+ * @param rule The rule to be added to this engine.
+ */
+ public void addRewriteRule(GlobalPlanRewriteRule rule) {
+ if (!rewriteRules.containsKey(rule.getName())) {
+ rewriteRules.put(rule.getName(), rule);
+ }
+ }
+
+ /**
+ * Rewrite a global plan with all query rewrite rules added to this engine.
+ *
+ * @param plan The plan to be rewritten with all query rewrite rule.
+ * @return The rewritten plan.
+ */
+ public MasterPlan rewrite(MasterPlan plan) throws PlanningException {
+ GlobalPlanRewriteRule rule;
+ for (Map.Entry<String, GlobalPlanRewriteRule> rewriteRule : rewriteRules.entrySet()) {
+ rule = rewriteRule.getValue();
+ if (rule.isEligible(plan)) {
+ plan = rule.rewrite(plan);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The rule \"" + rule.getName() + " \" rewrites the query.");
+ }
+ }
+ }
+
+ return plan;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRule.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRule.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRule.java
new file mode 100644
index 0000000..4a37207
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRule.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.global.rewriter;
+
+import org.apache.tajo.engine.planner.global.MasterPlan;
+
+/**
+ * A rewrite rule for global plans
+ */
+public interface GlobalPlanRewriteRule {
+
+ /**
+ * Return rule name
+ * @return Rule name
+ */
+ public abstract String getName();
+
+ /**
+ * Check if this rule should be applied.
+ *
+ * @param plan Global Plan
+ * @return
+ */
+ public abstract boolean isEligible(MasterPlan plan);
+
+ /**
+ * Rewrite a global plan
+ *
+ * @param plan Global Plan
+ * @return
+ */
+ public abstract MasterPlan rewrite(MasterPlan plan);
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRuleProvider.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRuleProvider.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRuleProvider.java
new file mode 100644
index 0000000..638b5f3
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanRewriteRuleProvider.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.global.rewriter;
+
+import org.apache.tajo.conf.TajoConf;
+
+import java.util.Collection;
+
+public abstract class GlobalPlanRewriteRuleProvider {
+ protected final TajoConf conf;
+
+ public GlobalPlanRewriteRuleProvider(TajoConf conf) {
+ this.conf = conf;
+ }
+
+ public abstract Collection<Class<? extends GlobalPlanRewriteRule>> getRules();
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanTestRuleProvider.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanTestRuleProvider.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanTestRuleProvider.java
new file mode 100644
index 0000000..dc91577
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/GlobalPlanTestRuleProvider.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.global.rewriter;
+
+import com.google.common.collect.Lists;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.engine.planner.global.rewriter.rules.GlobalPlanEqualityTester;
+
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * It is used only for test.
+ */
+@SuppressWarnings("unused")
+public class GlobalPlanTestRuleProvider extends BaseGlobalPlanRewriteRuleProvider {
+
+ public GlobalPlanTestRuleProvider(TajoConf conf) {
+ super(conf);
+ }
+
+ @Override
+ public Collection<Class<? extends GlobalPlanRewriteRule>> getRules() {
+ List<Class<? extends GlobalPlanRewriteRule>> injectedRules = Lists.newArrayList(super.getRules());
+ injectedRules.add(GlobalPlanEqualityTester.class);
+ return injectedRules;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/rules/GlobalPlanEqualityTester.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/rules/GlobalPlanEqualityTester.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/rules/GlobalPlanEqualityTester.java
new file mode 100644
index 0000000..e2fd47f
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/global/rewriter/rules/GlobalPlanEqualityTester.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.engine.planner.global.rewriter.rules;
+
+import org.apache.tajo.engine.planner.global.ExecutionBlock;
+import org.apache.tajo.engine.planner.global.ExecutionBlockCursor;
+import org.apache.tajo.engine.planner.global.MasterPlan;
+import org.apache.tajo.engine.planner.global.rewriter.GlobalPlanRewriteRule;
+import org.apache.tajo.plan.logical.LogicalNode;
+import org.apache.tajo.plan.serder.LogicalNodeDeserializer;
+import org.apache.tajo.plan.serder.LogicalNodeSerializer;
+import org.apache.tajo.plan.serder.PlanProto;
+
+/**
+ * It verifies the equality between the input and output of LogicalNodeTree(De)Serializer in global planning.
+ */
+public class GlobalPlanEqualityTester implements GlobalPlanRewriteRule {
+
+ @Override
+ public String getName() {
+ return "GlobalPlanEqualityTester";
+ }
+
+ @Override
+ public boolean isEligible(MasterPlan plan) {
+ return true;
+ }
+
+ @Override
+ public MasterPlan rewrite(MasterPlan plan) {
+ try {
+ ExecutionBlockCursor cursor = new ExecutionBlockCursor(plan);
+ while (cursor.hasNext()) {
+ ExecutionBlock eb = cursor.nextBlock();
+ LogicalNode node = eb.getPlan();
+ if (node != null) {
+ PlanProto.LogicalNodeTree tree = LogicalNodeSerializer.serialize(node);
+ LogicalNode deserialize = LogicalNodeDeserializer.deserialize(plan.getContext(), tree);
+ assert node.deepEquals(deserialize);
+ }
+ }
+ return plan;
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyFirstAggregationExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyFirstAggregationExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyFirstAggregationExec.java
index bd24fa3..aca4879 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyFirstAggregationExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyFirstAggregationExec.java
@@ -130,7 +130,7 @@ public class DistinctGroupbyFirstAggregationExec extends PhysicalExec {
}
resultTupleLength = groupingKeyIndexes.length + 1; //1 is Sequence Datum which indicates sequence of DistinctNode.
- List<GroupbyNode> groupbyNodes = plan.getGroupByNodes();
+ List<GroupbyNode> groupbyNodes = plan.getSubPlans();
List<DistinctHashAggregator> distinctAggrList = new ArrayList<DistinctHashAggregator>();
int distinctSeq = 0;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyHashAggregationExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyHashAggregationExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyHashAggregationExec.java
index eac5c70..37d61a9 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyHashAggregationExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyHashAggregationExec.java
@@ -76,7 +76,7 @@ public class DistinctGroupbyHashAggregationExec extends PhysicalExec {
distinctGroupingKeyIds[idx++] = intVal.intValue();
}
- List<GroupbyNode> groupbyNodes = plan.getGroupByNodes();
+ List<GroupbyNode> groupbyNodes = plan.getSubPlans();
groupbyNodeNum = groupbyNodes.size();
this.hashAggregators = new HashAggregator[groupbyNodeNum];
@@ -88,7 +88,7 @@ public class DistinctGroupbyHashAggregationExec extends PhysicalExec {
outputColumnNum = plan.getOutSchema().size();
int allGroupbyOutColNum = 0;
- for (GroupbyNode eachGroupby: plan.getGroupByNodes()) {
+ for (GroupbyNode eachGroupby: plan.getSubPlans()) {
allGroupbyOutColNum += eachGroupby.getOutSchema().size();
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySecondAggregationExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySecondAggregationExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySecondAggregationExec.java
index 383ccd3..cce9a24 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySecondAggregationExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySecondAggregationExec.java
@@ -100,7 +100,7 @@ public class DistinctGroupbySecondAggregationExec extends UnaryPhysicalExec {
numGroupingColumns = plan.getGroupingColumns().length;
- List<GroupbyNode> groupbyNodes = plan.getGroupByNodes();
+ List<GroupbyNode> groupbyNodes = plan.getSubPlans();
// Finding distinct group by column index.
Set<Integer> groupingKeyIndexSet = new HashSet<Integer>();
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySortAggregationExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySortAggregationExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySortAggregationExec.java
index 06b241c..6641633 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySortAggregationExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbySortAggregationExec.java
@@ -47,13 +47,13 @@ public class DistinctGroupbySortAggregationExec extends PhysicalExec {
super(context, plan.getInSchema(), plan.getOutSchema());
this.plan = plan;
this.aggregateExecs = aggregateExecs;
- this.groupbyNodeNum = plan.getGroupByNodes().size();
+ this.groupbyNodeNum = plan.getSubPlans().size();
currentTuples = new Tuple[groupbyNodeNum];
outColumnNum = outSchema.size();
int allGroupbyOutColNum = 0;
- for (GroupbyNode eachGroupby: plan.getGroupByNodes()) {
+ for (GroupbyNode eachGroupby: plan.getSubPlans()) {
allGroupbyOutColNum += eachGroupby.getOutSchema().size();
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyThirdAggregationExec.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyThirdAggregationExec.java b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyThirdAggregationExec.java
index ff6fc4a..a76b91d 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyThirdAggregationExec.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/planner/physical/DistinctGroupbyThirdAggregationExec.java
@@ -66,7 +66,7 @@ public class DistinctGroupbyThirdAggregationExec extends UnaryPhysicalExec {
numGroupingColumns = plan.getGroupingColumns().length;
resultTupleLength = numGroupingColumns;
- List<GroupbyNode> groupbyNodes = plan.getGroupByNodes();
+ List<GroupbyNode> groupbyNodes = plan.getSubPlans();
List<DistinctFinalAggregator> aggregatorList = new ArrayList<DistinctFinalAggregator>();
int inTupleIndex = 1 + numGroupingColumns;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequest.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequest.java b/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequest.java
index a3e586a..2fa272a 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequest.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequest.java
@@ -28,6 +28,7 @@ import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.engine.planner.enforce.Enforcer;
import org.apache.tajo.engine.planner.global.DataChannel;
import org.apache.tajo.ipc.TajoWorkerProtocol;
+import org.apache.tajo.plan.serder.PlanProto;
import org.apache.tajo.worker.FetchImpl;
import java.util.List;
@@ -38,7 +39,7 @@ public interface TaskRequest extends ProtoObject<TajoWorkerProtocol.TaskRequestP
public List<CatalogProtos.FragmentProto> getFragments();
public String getOutputTableId();
public boolean isClusteredOutput();
- public String getSerializedData();
+ public PlanProto.LogicalNodeTree getPlan();
public boolean isInterQuery();
public void setInterQuery();
public void addFetch(String name, FetchImpl fetch);
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequestImpl.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequestImpl.java b/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequestImpl.java
index cef5488..b4727dc 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequestImpl.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/query/TaskRequestImpl.java
@@ -25,6 +25,7 @@ import org.apache.tajo.engine.planner.global.DataChannel;
import org.apache.tajo.ipc.TajoWorkerProtocol;
import org.apache.tajo.ipc.TajoWorkerProtocol.TaskRequestProto;
import org.apache.tajo.ipc.TajoWorkerProtocol.TaskRequestProtoOrBuilder;
+import org.apache.tajo.plan.serder.PlanProto;
import org.apache.tajo.worker.FetchImpl;
import java.util.ArrayList;
@@ -39,7 +40,7 @@ public class TaskRequestImpl implements TaskRequest {
private String outputTable;
private boolean isUpdated;
private boolean clusteredOutput;
- private String serializedData; // logical node
+ private PlanProto.LogicalNodeTree plan; // logical node
private Boolean interQuery;
private List<FetchImpl> fetches;
private Boolean shouldDie;
@@ -59,9 +60,10 @@ public class TaskRequestImpl implements TaskRequest {
public TaskRequestImpl(TaskAttemptId id, List<FragmentProto> fragments,
String outputTable, boolean clusteredOutput,
- String serializedData, QueryContext queryContext, DataChannel channel, Enforcer enforcer) {
+ PlanProto.LogicalNodeTree plan, QueryContext queryContext, DataChannel channel,
+ Enforcer enforcer) {
this();
- this.set(id, fragments, outputTable, clusteredOutput, serializedData, queryContext, channel, enforcer);
+ this.set(id, fragments, outputTable, clusteredOutput, plan, queryContext, channel, enforcer);
}
public TaskRequestImpl(TaskRequestProto proto) {
@@ -73,12 +75,12 @@ public class TaskRequestImpl implements TaskRequest {
public void set(TaskAttemptId id, List<FragmentProto> fragments,
String outputTable, boolean clusteredOutput,
- String serializedData, QueryContext queryContext, DataChannel dataChannel, Enforcer enforcer) {
+ PlanProto.LogicalNodeTree plan, QueryContext queryContext, DataChannel dataChannel, Enforcer enforcer) {
this.id = id;
this.fragments = fragments;
this.outputTable = outputTable;
this.clusteredOutput = clusteredOutput;
- this.serializedData = serializedData;
+ this.plan = plan;
this.isUpdated = true;
this.queryContext = queryContext;
this.queryContext = queryContext;
@@ -150,16 +152,16 @@ public class TaskRequestImpl implements TaskRequest {
}
@Override
- public String getSerializedData() {
+ public PlanProto.LogicalNodeTree getPlan() {
TaskRequestProtoOrBuilder p = viaProto ? proto : builder;
- if (this.serializedData != null) {
- return this.serializedData;
+ if (this.plan != null) {
+ return this.plan;
}
- if (!p.hasSerializedData()) {
+ if (!p.hasPlan()) {
return null;
}
- this.serializedData = p.getSerializedData();
- return this.serializedData;
+ this.plan = p.getPlan();
+ return this.plan;
}
public boolean isInterQuery() {
@@ -292,8 +294,8 @@ public class TaskRequestImpl implements TaskRequest {
if (this.isUpdated) {
builder.setClusteredOutput(this.clusteredOutput);
}
- if (this.serializedData != null) {
- builder.setSerializedData(this.serializedData);
+ if (this.plan != null) {
+ builder.setPlan(this.plan);
}
if (this.interQuery != null) {
builder.setInterQuery(this.interQuery);
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/engine/utils/test/ErrorInjectionRewriter.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/engine/utils/test/ErrorInjectionRewriter.java b/tajo-core/src/main/java/org/apache/tajo/engine/utils/test/ErrorInjectionRewriter.java
index 9787276..29dc845 100644
--- a/tajo-core/src/main/java/org/apache/tajo/engine/utils/test/ErrorInjectionRewriter.java
+++ b/tajo-core/src/main/java/org/apache/tajo/engine/utils/test/ErrorInjectionRewriter.java
@@ -18,23 +18,25 @@
package org.apache.tajo.engine.utils.test;
+import org.apache.tajo.OverridableConf;
import org.apache.tajo.plan.LogicalPlan;
import org.apache.tajo.plan.PlanningException;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
-public class ErrorInjectionRewriter implements RewriteRule {
+@SuppressWarnings("unused")
+public class ErrorInjectionRewriter implements LogicalPlanRewriteRule {
@Override
public String getName() {
return "ErrorInjectionRewriter";
}
@Override
- public boolean isEligible(LogicalPlan plan) {
+ public boolean isEligible(OverridableConf queryContext, LogicalPlan plan) {
return true;
}
@Override
- public LogicalPlan rewrite(LogicalPlan plan) throws PlanningException {
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
throw new NullPointerException();
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/master/DefaultTaskScheduler.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/DefaultTaskScheduler.java b/tajo-core/src/main/java/org/apache/tajo/master/DefaultTaskScheduler.java
index dd6233c..1cd6587 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/DefaultTaskScheduler.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/DefaultTaskScheduler.java
@@ -35,13 +35,15 @@ import org.apache.tajo.engine.query.TaskRequest;
import org.apache.tajo.engine.query.TaskRequestImpl;
import org.apache.tajo.ipc.TajoWorkerProtocol;
import org.apache.tajo.master.cluster.WorkerConnectionInfo;
+import org.apache.tajo.master.container.TajoContainerId;
import org.apache.tajo.master.event.*;
import org.apache.tajo.master.event.TaskAttemptToSchedulerEvent.TaskAttemptScheduleContext;
import org.apache.tajo.master.event.TaskSchedulerEvent.EventType;
+import org.apache.tajo.master.querymaster.Stage;
import org.apache.tajo.master.querymaster.Task;
import org.apache.tajo.master.querymaster.TaskAttempt;
-import org.apache.tajo.master.querymaster.Stage;
-import org.apache.tajo.master.container.TajoContainerId;
+import org.apache.tajo.plan.serder.LogicalNodeSerializer;
+import org.apache.tajo.plan.serder.PlanProto;
import org.apache.tajo.storage.DataLocation;
import org.apache.tajo.storage.fragment.FileFragment;
import org.apache.tajo.storage.fragment.Fragment;
@@ -125,7 +127,7 @@ public class DefaultTaskScheduler extends AbstractTaskScheduler {
builder.setId(NULL_ATTEMPT_ID.getProto());
builder.setShouldDie(true);
builder.setOutputTable("");
- builder.setSerializedData("");
+ builder.setPlan(PlanProto.LogicalNodeTree.newBuilder());
builder.setClusteredOutput(false);
stopTaskRunnerReq = builder.build();
}
@@ -838,7 +840,7 @@ public class DefaultTaskScheduler extends AbstractTaskScheduler {
new ArrayList<FragmentProto>(task.getAllFragments()),
"",
false,
- task.getLogicalPlan().toJson(),
+ LogicalNodeSerializer.serialize(task.getLogicalPlan()),
context.getMasterContext().getQueryContext(),
stage.getDataChannel(), stage.getBlock().getEnforcer());
if (checkIfInterQuery(stage.getMasterPlan(), stage.getBlock())) {
@@ -894,7 +896,7 @@ public class DefaultTaskScheduler extends AbstractTaskScheduler {
Lists.newArrayList(task.getAllFragments()),
"",
false,
- task.getLogicalPlan().toJson(),
+ LogicalNodeSerializer.serialize(task.getLogicalPlan()),
context.getMasterContext().getQueryContext(),
stage.getDataChannel(),
stage.getBlock().getEnforcer());
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java b/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
index d7e7670..51964f0 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/GlobalEngine.java
@@ -94,6 +94,7 @@ public class GlobalEngine extends AbstractService {
annotatedPlanVerifier = new LogicalPlanVerifier(context.getConf(), context.getCatalog());
} catch (Throwable t) {
LOG.error(t.getMessage(), t);
+ throw new RuntimeException(t);
}
super.start();
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
index 3585ae7..10701f9 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
@@ -144,10 +144,10 @@ public class QueryExecutor {
// others
} else {
- if (setSessionNode.isDefaultValue()) {
- session.removeVariable(varName);
- } else {
+ if (setSessionNode.hasValue()) {
session.setVariable(varName, setSessionNode.getValue());
+ } else {
+ session.removeVariable(varName);
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/master/querymaster/QueryMasterTask.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/querymaster/QueryMasterTask.java b/tajo-core/src/main/java/org/apache/tajo/master/querymaster/QueryMasterTask.java
index e3d3d79..720d60a 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/querymaster/QueryMasterTask.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/querymaster/QueryMasterTask.java
@@ -37,13 +37,11 @@ import org.apache.tajo.catalog.CatalogService;
import org.apache.tajo.catalog.TableDesc;
import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.master.exec.prehook.DistributedQueryHookManager;
-import org.apache.tajo.master.exec.prehook.InsertIntoHook;
import org.apache.tajo.plan.LogicalOptimizer;
import org.apache.tajo.plan.LogicalPlan;
import org.apache.tajo.plan.LogicalPlanner;
import org.apache.tajo.plan.logical.LogicalRootNode;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.engine.planner.global.MasterPlan;
import org.apache.tajo.plan.logical.LogicalNode;
@@ -53,7 +51,6 @@ import org.apache.tajo.engine.query.QueryContext;
import org.apache.tajo.exception.UnimplementedException;
import org.apache.tajo.ipc.TajoMasterProtocol;
import org.apache.tajo.ipc.TajoWorkerProtocol;
-import org.apache.tajo.master.GlobalEngine;
import org.apache.tajo.master.TajoAsyncDispatcher;
import org.apache.tajo.master.TajoContainerProxy;
import org.apache.tajo.master.event.*;
@@ -380,10 +377,10 @@ public class QueryMasterTask extends CompositeService {
if (tableDesc == null) {
throw new VerifyException("Can't get table meta data from catalog: " + tableName);
}
- List<RewriteRule> storageSpecifiedRewriteRules = sm.getRewriteRules(
+ List<LogicalPlanRewriteRule> storageSpecifiedRewriteRules = sm.getRewriteRules(
getQueryTaskContext().getQueryContext(), tableDesc);
if (storageSpecifiedRewriteRules != null) {
- for (RewriteRule eachRule: storageSpecifiedRewriteRules) {
+ for (LogicalPlanRewriteRule eachRule: storageSpecifiedRewriteRules) {
optimizer.addRuleAfterToJoinOpt(eachRule);
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/worker/Task.java b/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
index 70a3202..5f9c6ac 100644
--- a/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
+++ b/tajo-core/src/main/java/org/apache/tajo/worker/Task.java
@@ -39,8 +39,8 @@ import org.apache.tajo.catalog.TableMeta;
import org.apache.tajo.catalog.proto.CatalogProtos;
import org.apache.tajo.catalog.statistics.TableStats;
import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.engine.json.CoreGsonHelper;
import org.apache.tajo.master.cluster.WorkerConnectionInfo;
+import org.apache.tajo.plan.serder.LogicalNodeDeserializer;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.engine.planner.physical.PhysicalExec;
import org.apache.tajo.engine.query.QueryContext;
@@ -124,7 +124,7 @@ public class Task {
this.context.setEnforcer(request.getEnforcer());
this.inputStats = new TableStats();
- plan = CoreGsonHelper.fromJson(request.getSerializedData(), LogicalNode.class);
+ plan = LogicalNodeDeserializer.deserialize(queryContext, request.getPlan());
LogicalNode [] scanNode = PlannerUtil.findAllNodes(plan, NodeType.SCAN);
if (scanNode != null) {
for (LogicalNode node : scanNode) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/main/proto/TajoWorkerProtocol.proto
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/proto/TajoWorkerProtocol.proto b/tajo-core/src/main/proto/TajoWorkerProtocol.proto
index 5acbcd9..b8c9575 100644
--- a/tajo-core/src/main/proto/TajoWorkerProtocol.proto
+++ b/tajo-core/src/main/proto/TajoWorkerProtocol.proto
@@ -70,7 +70,7 @@ message TaskRequestProto {
repeated FragmentProto fragments = 2;
required string outputTable = 3;
required bool clusteredOutput = 4;
- required string serializedData = 5;
+ required LogicalNodeTree plan = 5;
optional bool interQuery = 6 [default = false];
repeated FetchProto fetches = 7;
optional bool shouldDie = 8;
@@ -261,7 +261,7 @@ message JoinEnforce {
MERGE_JOIN = 4;
}
- required int32 pid = 1;
+ required int32 nodeId = 1;
required JoinAlgorithm algorithm = 2;
}
@@ -271,7 +271,7 @@ message GroupbyEnforce {
SORT_AGGREGATION = 1;
}
- required int32 pid = 1;
+ required int32 nodeId = 1;
required GroupbyAlgorithm algorithm = 2;
repeated SortSpecProto sortSpecs = 3;
}
@@ -282,7 +282,7 @@ message SortEnforce {
MERGE_SORT = 1;
}
- required int32 pid = 1;
+ required int32 nodeId = 1;
required SortAlgorithm algorithm = 2;
}
@@ -296,7 +296,7 @@ message ColumnPartitionEnforcer {
SORT_PARTITION = 1;
}
- required int32 pid = 1;
+ required int32 nodeId = 1;
required ColumnPartitionAlgorithm algorithm = 2;
}
@@ -313,10 +313,10 @@ message DistinctGroupbyEnforcer {
}
message SortSpecArray {
- required int32 pid = 1;
+ required int32 nodeId = 1;
repeated SortSpecProto sortSpecs = 2;
}
- required int32 pid = 1;
+ required int32 nodeId = 1;
required DistinctAggregationAlgorithm algorithm = 2;
repeated SortSpecArray sortSpecArrays = 3;
required bool isMultipleAggregation = 4 [default = false];
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/TajoTestingCluster.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/TajoTestingCluster.java b/tajo-core/src/test/java/org/apache/tajo/TajoTestingCluster.java
index 7dc1089..5ff637c 100644
--- a/tajo-core/src/test/java/org/apache/tajo/TajoTestingCluster.java
+++ b/tajo-core/src/test/java/org/apache/tajo/TajoTestingCluster.java
@@ -42,12 +42,14 @@ import org.apache.tajo.client.TajoClientImpl;
import org.apache.tajo.client.TajoClientUtil;
import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.conf.TajoConf.ConfVars;
+import org.apache.tajo.engine.planner.global.rewriter.GlobalPlanTestRuleProvider;
import org.apache.tajo.master.TajoMaster;
import org.apache.tajo.master.querymaster.Query;
import org.apache.tajo.master.querymaster.QueryMasterTask;
import org.apache.tajo.master.querymaster.Stage;
import org.apache.tajo.master.querymaster.StageState;
import org.apache.tajo.master.rm.TajoWorkerResourceManager;
+import org.apache.tajo.plan.rewrite.LogicalPlanTestRuleProvider;
import org.apache.tajo.util.CommonTestingUtil;
import org.apache.tajo.util.KeyValueSet;
import org.apache.tajo.util.NetUtils;
@@ -57,10 +59,7 @@ import java.io.*;
import java.net.InetSocketAddress;
import java.net.URL;
import java.sql.ResultSet;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TimeZone;
-import java.util.UUID;
+import java.util.*;
public class TajoTestingCluster {
private static Log LOG = LogFactory.getLog(TajoTestingCluster.class);
@@ -119,10 +118,18 @@ public class TajoTestingCluster {
}
void initPropertiesAndConfigs() {
+
+ // Set time zone
TimeZone testDefaultTZ = TimeZone.getTimeZone(TajoConstants.DEFAULT_SYSTEM_TIMEZONE);
conf.setSystemTimezone(testDefaultTZ);
TimeZone.setDefault(testDefaultTZ);
+ // Injection of equality testing code of logical plan (de)serialization
+ conf.setClassVar(ConfVars.LOGICAL_PLAN_REWRITE_RULE_PROVIDER_CLASS, LogicalPlanTestRuleProvider.class);
+ conf.setClassVar(ConfVars.GLOBAL_PLAN_REWRITE_RULE_PROVIDER_CLASS, GlobalPlanTestRuleProvider.class);
+
+
+ // default resource manager
if (System.getProperty(ConfVars.RESOURCE_MANAGER_CLASS.varname) != null) {
String testResourceManager = System.getProperty(ConfVars.RESOURCE_MANAGER_CLASS.varname);
Preconditions.checkState(testResourceManager.equals(TajoWorkerResourceManager.class.getCanonicalName()));
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/engine/eval/ExprTestBase.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/eval/ExprTestBase.java b/tajo-core/src/test/java/org/apache/tajo/engine/eval/ExprTestBase.java
index e286b92..4e4b710 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/eval/ExprTestBase.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/eval/ExprTestBase.java
@@ -38,8 +38,8 @@ import org.apache.tajo.engine.json.CoreGsonHelper;
import org.apache.tajo.engine.parser.SQLAnalyzer;
import org.apache.tajo.plan.*;
import org.apache.tajo.plan.expr.EvalNode;
-import org.apache.tajo.plan.serder.EvalTreeProtoDeserializer;
-import org.apache.tajo.plan.serder.EvalTreeProtoSerializer;
+import org.apache.tajo.plan.serder.EvalNodeDeserializer;
+import org.apache.tajo.plan.serder.EvalNodeSerializer;
import org.apache.tajo.engine.query.QueryContext;
import org.apache.tajo.catalog.SchemaUtil;
import org.apache.tajo.plan.serder.PlanProto;
@@ -62,7 +62,9 @@ import java.util.TimeZone;
import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
import static org.apache.tajo.TajoConstants.DEFAULT_TABLESPACE_NAME;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
public class ExprTestBase {
private static TajoTestingCluster util;
@@ -141,7 +143,7 @@ public class ExprTestBase {
assertFalse(state.getErrorMessages().get(0), true);
}
LogicalPlan plan = planner.createPlan(context, expr, true);
- optimizer.optimize(plan);
+ optimizer.optimize(context, plan);
annotatedPlanVerifier.verify(context, state, plan);
if (state.getErrorMessages().size() > 0) {
@@ -318,7 +320,7 @@ public class ExprTestBase {
}
public static void assertEvalTreeProtoSerDer(OverridableConf context, EvalNode evalNode) {
- PlanProto.EvalTree converted = EvalTreeProtoSerializer.serialize(evalNode);
- assertEquals(evalNode, EvalTreeProtoDeserializer.deserialize(context, converted));
+ PlanProto.EvalNodeTree converted = EvalNodeSerializer.serialize(evalNode);
+ assertEquals(evalNode, EvalNodeDeserializer.deserialize(context, converted));
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
index bfd1700..794c14f 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestGroupByQuery.java
@@ -344,55 +344,84 @@ public class TestGroupByQuery extends QueryTestCaseBase {
}
@Test
- public final void testDistinctAggregationCasebyCase() throws Exception {
- ResultSet res;
-
+ public final void testDistinctAggregationCasebyCase1() throws Exception {
// one groupby, distinct, aggregation
- res = executeFile("testDistinctAggregation_case1.sql");
+ ResultSet res = executeFile("testDistinctAggregation_case1.sql");
assertResultSet(res, "testDistinctAggregation_case1.result");
res.close();
+ }
+ @Test
+ public final void testDistinctAggregationCasebyCase2() throws Exception {
// one groupby, two distinct, one aggregation
- res = executeFile("testDistinctAggregation_case2.sql");
+ ResultSet res = executeFile("testDistinctAggregation_case2.sql");
assertResultSet(res, "testDistinctAggregation_case2.result");
res.close();
+ }
+ @Test
+ public final void testDistinctAggregationCasebyCase3() throws Exception {
// one groupby, two distinct, two aggregation(no alias)
- res = executeFile("testDistinctAggregation_case3.sql");
+ ResultSet res = executeFile("testDistinctAggregation_case3.sql");
assertResultSet(res, "testDistinctAggregation_case3.result");
res.close();
+ }
+ @Test
+ public final void testDistinctAggregationCasebyCase4() throws Exception {
// two groupby, two distinct, two aggregation
- res = executeFile("testDistinctAggregation_case4.sql");
+ ResultSet res = executeFile("testDistinctAggregation_case4.sql");
assertResultSet(res, "testDistinctAggregation_case4.result");
res.close();
+ }
+ @Test
+ public final void testDistinctAggregationCasebyCase5() throws Exception {
// two groupby, two distinct, two aggregation with stage
- res = executeFile("testDistinctAggregation_case5.sql");
+ ResultSet res = executeFile("testDistinctAggregation_case5.sql");
assertResultSet(res, "testDistinctAggregation_case5.result");
res.close();
+ }
- res = executeFile("testDistinctAggregation_case6.sql");
+ @Test
+ public final void testDistinctAggregationCasebyCase6() throws Exception {
+ ResultSet res = executeFile("testDistinctAggregation_case6.sql");
assertResultSet(res, "testDistinctAggregation_case6.result");
res.close();
+ }
- res = executeFile("testDistinctAggregation_case7.sql");
+ @Test
+ public final void testDistinctAggregationCasebyCase7() throws Exception {
+ ResultSet res = executeFile("testDistinctAggregation_case7.sql");
assertResultSet(res, "testDistinctAggregation_case7.result");
res.close();
+ }
- res = executeFile("testDistinctAggregation_case8.sql");
+ @Test
+ public final void testDistinctAggregationCasebyCase8() throws Exception {
+ ResultSet res = executeFile("testDistinctAggregation_case8.sql");
assertResultSet(res, "testDistinctAggregation_case8.result");
res.close();
+ }
- res = executeFile("testDistinctAggregation_case9.sql");
+ @Test
+ public final void testDistinctAggregationCasebyCase9() throws Exception {
+ ResultSet res = executeFile("testDistinctAggregation_case9.sql");
assertResultSet(res, "testDistinctAggregation_case9.result");
res.close();
+ }
- res = executeFile("testDistinctAggregation_case10.sql");
+ @Test
+ public final void testDistinctAggregationCasebyCase10() throws Exception {
+ ResultSet res = executeFile("testDistinctAggregation_case10.sql");
assertResultSet(res, "testDistinctAggregation_case10.result");
res.close();
+ }
+
+ @Test
+ public final void testDistinctAggregationCasebyCase11() throws Exception {
+ ResultSet res;
- // case9
KeyValueSet tableOptions = new KeyValueSet();
tableOptions.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
tableOptions.set(StorageConstants.TEXT_NULL, "\\\\N");
@@ -417,7 +446,7 @@ public class TestGroupByQuery extends QueryTestCaseBase {
assertEquals(expected, resultSetToString(res));
- // multiple distinct with expression
+ // multiple distinct with expression
res = executeString(
"select count(distinct code) + count(distinct qty) from table10"
);
[6/8] tajo git commit: TAJO-1176: Implements queryable virtual tables
for catalog information
Posted by ji...@apache.org.
TAJO-1176: Implements queryable virtual tables for catalog information
Closes #273
Project: http://git-wip-us.apache.org/repos/asf/tajo/repo
Commit: http://git-wip-us.apache.org/repos/asf/tajo/commit/021a6f0b
Tree: http://git-wip-us.apache.org/repos/asf/tajo/tree/021a6f0b
Diff: http://git-wip-us.apache.org/repos/asf/tajo/diff/021a6f0b
Branch: refs/heads/index_support
Commit: 021a6f0b216ca9f67a5a889e72d2a8ce81c047f2
Parents: 32be38d
Author: Jihun Kang <ji...@apache.org>
Authored: Tue Dec 30 23:54:05 2014 +0900
Committer: Jihun Kang <ji...@apache.org>
Committed: Tue Dec 30 23:54:05 2014 +0900
----------------------------------------------------------------------
.../tajo/catalog/AbstractCatalogClient.java | 152 +++++
.../src/main/proto/CatalogProtocol.proto | 9 +-
.../org/apache/tajo/catalog/CatalogService.java | 37 +-
.../src/main/proto/CatalogProtos.proto | 81 ++-
.../tajo/catalog/store/HCatalogStore.java | 53 +-
.../org/apache/tajo/catalog/CatalogServer.java | 247 ++++++--
.../dictionary/AbstractTableDescriptor.java | 90 +++
.../catalog/dictionary/ColumnDescriptor.java | 47 ++
.../dictionary/ColumnsTableDescriptor.java | 48 ++
.../dictionary/DatabasesTableDescriptor.java | 47 ++
.../dictionary/IndexesTableDescriptor.java | 52 ++
.../InfoSchemaMetadataDictionary.java | 124 ++++
.../dictionary/PartitionsTableDescriptor.java | 48 ++
.../catalog/dictionary/TableDescriptor.java | 29 +
.../dictionary/TableOptionsTableDescriptor.java | 46 ++
.../dictionary/TableStatsTableDescriptor.java | 46 ++
.../dictionary/TablesTableDescriptor.java | 49 ++
.../dictionary/TablespacesTableDescriptor.java | 48 ++
.../tajo/catalog/store/AbstractDBStore.java | 295 +++++++++
.../apache/tajo/catalog/store/CatalogStore.java | 24 +
.../org/apache/tajo/catalog/store/MemStore.java | 189 ++++++
.../org/apache/tajo/catalog/TestCatalog.java | 15 +-
.../NonForwardQueryResultFileScanner.java | 164 +++++
.../master/NonForwardQueryResultScanner.java | 148 +----
.../NonForwardQueryResultSystemScanner.java | 616 +++++++++++++++++++
.../tajo/master/TajoMasterClientService.java | 2 +-
.../apache/tajo/master/exec/QueryExecutor.java | 27 +-
.../TestNonForwardQueryResultSystemScanner.java | 296 +++++++++
.../org/apache/tajo/plan/LogicalPlanner.java | 3 +-
.../org/apache/tajo/plan/util/PlannerUtil.java | 21 +
30 files changed, 2857 insertions(+), 196 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java b/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
index 6b50115..8ef1c9a 100644
--- a/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
+++ b/tajo-catalog/tajo-catalog-client/src/main/java/org/apache/tajo/catalog/AbstractCatalogClient.java
@@ -19,12 +19,20 @@
package org.apache.tajo.catalog;
import com.google.protobuf.ServiceException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.tajo.annotation.Nullable;
import org.apache.tajo.catalog.CatalogProtocol.CatalogProtocolService;
import org.apache.tajo.catalog.exception.NoSuchFunctionException;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.DatabaseProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescriptorProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableOptionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablespaceProto;
import org.apache.tajo.catalog.proto.CatalogProtos.*;
import org.apache.tajo.common.TajoDataTypes.DataType;
import org.apache.tajo.conf.TajoConf;
@@ -139,6 +147,24 @@ public abstract class AbstractCatalogClient implements CatalogService {
return null;
}
}
+
+ @Override
+ public List<TablespaceProto> getAllTablespaces() {
+ try {
+ return new ServerCallable<List<TablespaceProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<TablespaceProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ CatalogProtos.GetTablespacesProto response = stub.getAllTablespaces(null, ProtoUtil.NULL_PROTO);
+ return response.getTablespaceList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
@Override
public TablespaceProto getTablespace(final String tablespaceName) {
@@ -236,6 +262,24 @@ public abstract class AbstractCatalogClient implements CatalogService {
return null;
}
}
+
+ @Override
+ public List<DatabaseProto> getAllDatabases() {
+ try {
+ return new ServerCallable<List<DatabaseProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<DatabaseProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetDatabasesProto response = stub.getAllDatabases(null, ProtoUtil.NULL_PROTO);
+ return response.getDatabaseList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
@Override
public final TableDesc getTableDesc(final String databaseName, final String tableName) {
@@ -261,6 +305,78 @@ public abstract class AbstractCatalogClient implements CatalogService {
String [] splitted = CatalogUtil.splitFQTableName(qualifiedName);
return getTableDesc(splitted[0], splitted[1]);
}
+
+ @Override
+ public List<TableDescriptorProto> getAllTables() {
+ try {
+ return new ServerCallable<List<TableDescriptorProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<TableDescriptorProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetTablesProto response = stub.getAllTables(null, ProtoUtil.NULL_PROTO);
+ return response.getTableList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
+
+ @Override
+ public List<TableOptionProto> getAllTableOptions() {
+ try {
+ return new ServerCallable<List<TableOptionProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<TableOptionProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetTableOptionsProto response = stub.getAllTableOptions(null, ProtoUtil.NULL_PROTO);
+ return response.getTableOptionList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
+
+ @Override
+ public List<TableStatsProto> getAllTableStats() {
+ try {
+ return new ServerCallable<List<TableStatsProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<TableStatsProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetTableStatsProto response = stub.getAllTableStats(null, ProtoUtil.NULL_PROTO);
+ return response.getStatList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
+
+ @Override
+ public List<ColumnProto> getAllColumns() {
+ try {
+ return new ServerCallable<List<ColumnProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<ColumnProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetColumnsProto response = stub.getAllColumns(null, ProtoUtil.NULL_PROTO);
+ return response.getColumnList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
@Override
public final PartitionMethodDesc getPartitionMethod(final String databaseName, final String tableName) {
@@ -301,6 +417,24 @@ public abstract class AbstractCatalogClient implements CatalogService {
return false;
}
}
+
+ @Override
+ public List<TablePartitionProto> getAllPartitions() {
+ try {
+ return new ServerCallable<List<TablePartitionProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<TablePartitionProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetTablePartitionsProto response = stub.getAllPartitions(null, ProtoUtil.NULL_PROTO);
+ return response.getPartList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
@Override
public final Collection<String> getAllTableNames(final String databaseName) {
@@ -529,6 +663,24 @@ public abstract class AbstractCatalogClient implements CatalogService {
return false;
}
}
+
+ @Override
+ public List<IndexProto> getAllIndexes() {
+ try {
+ return new ServerCallable<List<IndexProto>>(pool, getCatalogServerAddr(), CatalogProtocol.class, false) {
+
+ @Override
+ public List<IndexProto> call(NettyClientBase client) throws Exception {
+ CatalogProtocolService.BlockingInterface stub = getStub(client);
+ GetIndexesProto response = stub.getAllIndexes(null, ProtoUtil.NULL_PROTO);
+ return response.getIndexList();
+ }
+ }.withRetries();
+ } catch (ServiceException e) {
+ LOG.error(e.getMessage(), e);
+ return null;
+ }
+ }
@Override
public final boolean createFunction(final FunctionDesc funcDesc) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto b/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
index adf0740..cae5d88 100644
--- a/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
+++ b/tajo-catalog/tajo-catalog-client/src/main/proto/CatalogProtocol.proto
@@ -29,23 +29,28 @@ service CatalogProtocolService {
rpc createTablespace(CreateTablespaceRequest) returns (BoolProto);
rpc dropTablespace(StringProto) returns (BoolProto);
rpc existTablespace(StringProto) returns (BoolProto);
+ rpc getAllTablespaces(NullProto) returns (GetTablespacesProto);
rpc getAllTablespaceNames(NullProto) returns (StringListProto);
rpc getTablespace(StringProto) returns (TablespaceProto);
rpc alterTablespace(AlterTablespaceProto) returns (BoolProto);
rpc alterTable(AlterTableDescProto) returns (BoolProto);
rpc updateTableStats(UpdateTableStatsProto) returns (BoolProto);
-
rpc createDatabase(CreateDatabaseRequest) returns (BoolProto);
rpc dropDatabase(StringProto) returns (BoolProto);
rpc existDatabase(StringProto) returns (BoolProto);
rpc getAllDatabaseNames(NullProto) returns (StringListProto);
+ rpc getAllDatabases(NullProto) returns (GetDatabasesProto);
rpc createTable(TableDescProto) returns (BoolProto);
rpc dropTable(TableIdentifierProto) returns (BoolProto);
rpc existsTable(TableIdentifierProto) returns (BoolProto);
rpc getTableDesc(TableIdentifierProto) returns (TableDescProto);
rpc getAllTableNames(StringProto) returns (StringListProto);
+ rpc getAllTables(NullProto) returns (GetTablesProto);
+ rpc getAllTableOptions(NullProto) returns (GetTableOptionsProto);
+ rpc getAllTableStats(NullProto) returns (GetTableStatsProto);
+ rpc getAllColumns(NullProto) returns (GetColumnsProto);
rpc getPartitionMethodByTableName(TableIdentifierProto) returns (PartitionMethodProto);
rpc existPartitionMethod(TableIdentifierProto) returns (BoolProto);
@@ -56,6 +61,7 @@ service CatalogProtocolService {
rpc getPartitionByPartitionName(StringProto) returns (PartitionDescProto);
rpc getPartitionsByTableName(StringProto) returns (PartitionsProto);
rpc delAllPartitions(StringProto) returns (PartitionsProto);
+ rpc getAllPartitions(NullProto) returns (GetTablePartitionsProto);
rpc createIndex(IndexDescProto) returns (BoolProto);
rpc dropIndex(IndexNameProto) returns (BoolProto);
@@ -63,6 +69,7 @@ service CatalogProtocolService {
rpc existIndexByColumn(GetIndexByColumnRequest) returns (BoolProto);
rpc getIndexByName(IndexNameProto) returns (IndexDescProto);
rpc getIndexByColumn(GetIndexByColumnRequest) returns (IndexDescProto);
+ rpc getAllIndexes(NullProto) returns (GetIndexesProto);
rpc createFunction(FunctionDescProto) returns (BoolProto);
rpc dropFunction(UnregisterFunctionRequest) returns (BoolProto);
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
index b41b636..eb11272 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
+++ b/tajo-catalog/tajo-catalog-common/src/main/java/org/apache/tajo/catalog/CatalogService.java
@@ -19,10 +19,17 @@
package org.apache.tajo.catalog;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
-import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.DatabaseProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.IndexProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescriptorProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableOptionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablePartitionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
import org.apache.tajo.common.TajoDataTypes.DataType;
import java.util.Collection;
+import java.util.List;
import static org.apache.tajo.catalog.proto.CatalogProtos.AlterTablespaceProto;
import static org.apache.tajo.catalog.proto.CatalogProtos.FunctionType;
@@ -58,6 +65,11 @@ public interface CatalogService {
* @return All tablespace names
*/
Collection<String> getAllTablespaceNames();
+
+ /**
+ *
+ */
+ List<TablespaceProto> getAllTablespaces();
/**
*
@@ -99,6 +111,11 @@ public interface CatalogService {
* @return All database names
*/
Collection<String> getAllDatabaseNames();
+
+ /**
+ *
+ */
+ List<DatabaseProto> getAllDatabases();
/**
* Get a table description by name
@@ -122,6 +139,20 @@ public interface CatalogService {
* @return All table names which belong to a given database.
*/
Collection<String> getAllTableNames(String databaseName);
+
+ /**
+ *
+ */
+ List<TableDescriptorProto> getAllTables();
+
+ List<TableOptionProto> getAllTableOptions();
+
+ List<TableStatsProto> getAllTableStats();
+
+ /**
+ *
+ */
+ List<ColumnProto> getAllColumns();
/**
*
@@ -152,6 +183,8 @@ public interface CatalogService {
PartitionMethodDesc getPartitionMethod(String databaseName, String tableName);
boolean existPartitionMethod(String databaseName, String tableName);
+
+ List<TablePartitionProto> getAllPartitions();
boolean createIndex(IndexDesc index);
@@ -164,6 +197,8 @@ public interface CatalogService {
IndexDesc getIndexByColumn(String databaseName, String tableName, String columnName);
boolean dropIndex(String databaseName, String indexName);
+
+ List<IndexProto> getAllIndexes();
boolean createFunction(FunctionDesc funcDesc);
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto b/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
index 946b563..a204685 100644
--- a/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
+++ b/tajo-catalog/tajo-catalog-common/src/main/proto/CatalogProtos.proto
@@ -38,6 +38,7 @@ enum StoreType {
TEXTFILE = 10;
JSON = 11;
HBASE = 12;
+ SYSTEM = 13;
}
enum OrderType {
@@ -61,6 +62,7 @@ enum AlterTableType {
message ColumnProto {
required string name = 1;
+ optional int32 tid = 2;
required DataType dataType = 3;
}
@@ -101,7 +103,9 @@ message TableDescProto {
message TableIdentifierProto {
required string database_name = 1;
- required string table_name = 3;
+ required string table_name = 2;
+ optional int32 dbId = 3;
+ optional int32 tid = 4;
}
message NamespaceProto {
@@ -130,6 +134,63 @@ message GetAllTableNamesResponse {
repeated string tableName = 1;
}
+message GetTablespacesProto {
+ repeated TablespaceProto tablespace = 1;
+}
+
+message GetDatabasesProto {
+ repeated DatabaseProto database = 1;
+}
+
+message GetTablesProto {
+ repeated TableDescriptorProto table = 1;
+}
+
+message GetColumnsProto {
+ repeated ColumnProto column = 1;
+}
+
+message GetIndexesProto {
+ repeated IndexProto index = 1;
+}
+
+message GetTableOptionsProto {
+ repeated TableOptionProto tableOption = 1;
+}
+
+message GetTableStatsProto {
+ repeated TableStatsProto stat = 1;
+}
+
+message GetTablePartitionsProto {
+ repeated TablePartitionProto part = 1;
+}
+
+message IndexProto {
+ required int32 dbId = 1;
+ required int32 tId = 2;
+ required string indexName = 3;
+ required string columnName = 4;
+ required string dataType = 5;
+ required string indexType = 6;
+ optional bool isUnique = 7 [default = false];
+ optional bool isClustered = 8 [default = false];
+ optional bool isAscending = 9 [default = false];
+}
+
+message TableOptionProto {
+ required int32 tid = 1;
+ required KeyValueProto keyval = 2;
+}
+
+message TablePartitionProto {
+ required int32 pid = 1;
+ required int32 tid = 2;
+ optional string partitionName = 3;
+ required int32 ordinalPosition = 4;
+ optional string path = 5;
+}
+
message GetIndexByColumnRequest {
required TableIdentifierProto tableIdentifier = 1;
required string columnName = 2;
@@ -169,6 +230,7 @@ message TableStatsProto {
optional int64 avgRows = 6;
optional int64 readBytes = 7;
repeated ColumnStatsProto colStat = 8;
+ optional int32 tid = 9;
}
message ColumnStatsProto {
@@ -240,6 +302,23 @@ message PartitionDescProto {
message TablespaceProto {
required string spaceName = 1;
required string uri = 2;
+ optional string handler = 3;
+ optional int32 id = 4;
+}
+
+message DatabaseProto {
+ required int32 spaceId = 1;
+ required int32 id = 2;
+ required string name = 3;
+}
+
+message TableDescriptorProto {
+ required int32 dbId = 1;
+ required int32 tid = 2;
+ required string name = 3;
+ optional string tableType = 4;
+ required string path = 5;
+ required string storeType = 6;
}
message AlterTablespaceProto {
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-drivers/tajo-hcatalog/src/main/java/org/apache/tajo/catalog/store/HCatalogStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-drivers/tajo-hcatalog/src/main/java/org/apache/tajo/catalog/store/HCatalogStore.java b/tajo-catalog/tajo-catalog-drivers/tajo-hcatalog/src/main/java/org/apache/tajo/catalog/store/HCatalogStore.java
index ad0aee3..89c0fdd 100644
--- a/tajo-catalog/tajo-catalog-drivers/tajo-hcatalog/src/main/java/org/apache/tajo/catalog/store/HCatalogStore.java
+++ b/tajo-catalog/tajo-catalog-drivers/tajo-hcatalog/src/main/java/org/apache/tajo/catalog/store/HCatalogStore.java
@@ -19,6 +19,7 @@
package org.apache.tajo.catalog.store;
import com.google.common.collect.Lists;
+
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -37,10 +38,17 @@ import org.apache.hcatalog.data.schema.HCatFieldSchema;
import org.apache.hcatalog.data.schema.HCatSchema;
import org.apache.tajo.TajoConstants;
import org.apache.tajo.catalog.*;
-import org.apache.tajo.catalog.Schema;
import org.apache.tajo.catalog.exception.*;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.DatabaseProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.IndexProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescriptorProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableOptionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablePartitionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablespaceProto;
import org.apache.tajo.catalog.statistics.TableStats;
import org.apache.tajo.common.TajoDataTypes;
import org.apache.tajo.common.exception.NotImplementedException;
@@ -54,7 +62,6 @@ import java.io.IOException;
import java.util.*;
import static org.apache.tajo.catalog.proto.CatalogProtos.PartitionType;
-import static org.apache.tajo.catalog.proto.CatalogProtos.TablespaceProto;
public class HCatalogStore extends CatalogConstants implements CatalogStore {
protected final Log LOG = LogFactory.getLog(getClass());
@@ -236,7 +243,7 @@ public class HCatalogStore extends CatalogConstants implements CatalogStore {
List<FieldSchema> partitionKeys = table.getPartitionKeys();
if (null != partitionKeys) {
- Schema expressionSchema = new Schema();
+ org.apache.tajo.catalog.Schema expressionSchema = new org.apache.tajo.catalog.Schema();
StringBuilder sb = new StringBuilder();
if (partitionKeys.size() > 0) {
for (int i = 0; i < partitionKeys.size(); i++) {
@@ -841,4 +848,44 @@ public class HCatalogStore extends CatalogConstants implements CatalogStore {
return exist;
}
+
+ @Override
+ public List<ColumnProto> getAllColumns() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<DatabaseProto> getAllDatabases() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<IndexProto> getAllIndexes() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<TablePartitionProto> getAllPartitions() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<TableOptionProto> getAllTableOptions() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<TableStatsProto> getAllTableStats() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<TableDescriptorProto> getAllTables() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<TablespaceProto> getTablespaces() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
index 57086e2..30b1767 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/CatalogServer.java
@@ -23,6 +23,7 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -30,6 +31,7 @@ import org.apache.hadoop.service.AbstractService;
import org.apache.tajo.TajoConstants;
import org.apache.tajo.annotation.ThreadSafe;
import org.apache.tajo.catalog.CatalogProtocol.CatalogProtocolService;
+import org.apache.tajo.catalog.dictionary.InfoSchemaMetadataDictionary;
import org.apache.tajo.catalog.exception.*;
import org.apache.tajo.catalog.proto.CatalogProtos.*;
import org.apache.tajo.catalog.store.CatalogStore;
@@ -79,6 +81,7 @@ public class CatalogServer extends AbstractService {
private CatalogStore store;
private Map<String, List<FunctionDescProto>> functions = new ConcurrentHashMap<String,
List<FunctionDescProto>>();
+ private final InfoSchemaMetadataDictionary metaDictionary = new InfoSchemaMetadataDictionary();
// RPC variables
private BlockingRpcServer rpcServer;
@@ -298,6 +301,18 @@ public class CatalogServer extends AbstractService {
rlock.unlock();
}
}
+
+ @Override
+ public GetTablespacesProto getAllTablespaces(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetTablespacesProto.newBuilder().addAllTablespace(store.getTablespaces()).build();
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ }
@Override
public TablespaceProto getTablespace(RpcController controller, StringProto request) throws ServiceException {
@@ -349,6 +364,10 @@ public class CatalogServer extends AbstractService {
String databaseName = request.getDatabaseName();
String tablespaceName = request.getTablespaceName();
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ throw new ServiceException(databaseName + " is a system database name.");
+ }
+
wlock.lock();
try {
if (store.existDatabase(databaseName)) {
@@ -389,9 +408,14 @@ public class CatalogServer extends AbstractService {
@Override
public BoolProto alterTable(RpcController controller, AlterTableDescProto proto) throws ServiceException {
+ String [] split = CatalogUtil.splitTableName(proto.getTableName());
+
+ if (metaDictionary.isSystemDatabase(split[0])) {
+ throw new ServiceException(split[0] + " is a system database.");
+ }
+
wlock.lock();
try {
- String [] split = CatalogUtil.splitTableName(proto.getTableName());
if (!store.existTable(split[0], split[1])) {
throw new NoSuchTableException(proto.getTableName());
}
@@ -410,6 +434,10 @@ public class CatalogServer extends AbstractService {
@Override
public BoolProto dropDatabase(RpcController controller, StringProto request) throws ServiceException {
String databaseName = request.getValue();
+
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ throw new ServiceException(databaseName + " is a system database.");
+ }
wlock.lock();
try {
@@ -432,13 +460,33 @@ public class CatalogServer extends AbstractService {
public BoolProto existDatabase(RpcController controller, StringProto request) throws ServiceException {
String databaseName = request.getValue();
+ if (!metaDictionary.isSystemDatabase(databaseName)) {
+ rlock.lock();
+ try {
+ if (store.existDatabase(databaseName)) {
+ return ProtoUtil.TRUE;
+ } else {
+ return ProtoUtil.FALSE;
+ }
+ } catch (Exception e) {
+ LOG.error(e);
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ } else {
+ return ProtoUtil.TRUE;
+ }
+ }
+
+ @Override
+ public StringListProto getAllDatabaseNames(RpcController controller, NullProto request) throws ServiceException {
rlock.lock();
try {
- if (store.existDatabase(databaseName)) {
- return ProtoUtil.TRUE;
- } else {
- return ProtoUtil.FALSE;
- }
+ StringListProto.Builder builder = StringListProto.newBuilder();
+ builder.addAllValues(store.getAllDatabaseNames());
+ builder.addValues(metaDictionary.getSystemDatabaseName());
+ return builder.build();
} catch (Exception e) {
LOG.error(e);
throw new ServiceException(e);
@@ -446,14 +494,13 @@ public class CatalogServer extends AbstractService {
rlock.unlock();
}
}
-
+
@Override
- public StringListProto getAllDatabaseNames(RpcController controller, NullProto request) throws ServiceException {
+ public GetDatabasesProto getAllDatabases(RpcController controller, NullProto request) throws ServiceException {
rlock.lock();
try {
- return ProtoUtil.convertStrings(store.getAllDatabaseNames());
+ return GetDatabasesProto.newBuilder().addAllDatabase(store.getAllDatabases()).build();
} catch (Exception e) {
- LOG.error(e);
throw new ServiceException(e);
} finally {
rlock.unlock();
@@ -466,27 +513,31 @@ public class CatalogServer extends AbstractService {
String databaseName = request.getDatabaseName();
String tableName = request.getTableName();
- rlock.lock();
- try {
- boolean contain;
+ if (metaDictionary.isSystemDatabase(databaseName)){
+ return metaDictionary.getTableDesc(tableName);
+ } else {
+ rlock.lock();
+ try {
+ boolean contain;
- contain = store.existDatabase(databaseName);
+ contain = store.existDatabase(databaseName);
- if (contain) {
- contain = store.existTable(databaseName, tableName);
if (contain) {
- return store.getTable(databaseName, tableName);
+ contain = store.existTable(databaseName, tableName);
+ if (contain) {
+ return store.getTable(databaseName, tableName);
+ } else {
+ throw new NoSuchTableException(tableName);
+ }
} else {
- throw new NoSuchTableException(tableName);
+ throw new NoSuchDatabaseException(databaseName);
}
- } else {
- throw new NoSuchDatabaseException(databaseName);
+ } catch (Exception e) {
+ LOG.error(e);
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
}
- } catch (Exception e) {
- LOG.error(e);
- throw new ServiceException(e);
- } finally {
- rlock.unlock();
}
}
@@ -496,18 +547,22 @@ public class CatalogServer extends AbstractService {
String databaseName = request.getValue();
- rlock.lock();
- try {
- if (store.existDatabase(databaseName)) {
- return ProtoUtil.convertStrings(store.getAllTableNames(databaseName));
- } else {
- throw new NoSuchDatabaseException(databaseName);
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ return ProtoUtil.convertStrings(metaDictionary.getAllSystemTables());
+ } else {
+ rlock.lock();
+ try {
+ if (store.existDatabase(databaseName)) {
+ return ProtoUtil.convertStrings(store.getAllTableNames(databaseName));
+ } else {
+ throw new NoSuchDatabaseException(databaseName);
+ }
+ } catch (Exception e) {
+ LOG.error(e);
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
}
- } catch (Exception e) {
- LOG.error(e);
- throw new ServiceException(e);
- } finally {
- rlock.unlock();
}
}
@@ -532,6 +587,10 @@ public class CatalogServer extends AbstractService {
String databaseName = splitted[0];
String tableName = splitted[1];
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ throw new ServiceException(databaseName + " is a system database.");
+ }
+
wlock.lock();
try {
@@ -563,6 +622,10 @@ public class CatalogServer extends AbstractService {
String databaseName = request.getDatabaseName();
String tableName = request.getTableName();
+
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ throw new ServiceException(databaseName + " is a system database.");
+ }
wlock.lock();
try {
@@ -595,27 +658,83 @@ public class CatalogServer extends AbstractService {
String databaseName = request.getDatabaseName();
String tableName = request.getTableName();
- rlock.lock();
- try {
+ if (!metaDictionary.isSystemDatabase(databaseName)) {
+ rlock.lock();
+ try {
- boolean contain = store.existDatabase(databaseName);
+ boolean contain = store.existDatabase(databaseName);
- if (contain) {
- if (store.existTable(databaseName, tableName)) {
- return BOOL_TRUE;
+ if (contain) {
+ if (store.existTable(databaseName, tableName)) {
+ return BOOL_TRUE;
+ } else {
+ return BOOL_FALSE;
+ }
} else {
- return BOOL_FALSE;
+ throw new NoSuchDatabaseException(databaseName);
}
+ } catch (Exception e) {
+ LOG.error(e);
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ } else {
+ if (metaDictionary.existTable(tableName)) {
+ return BOOL_TRUE;
} else {
- throw new NoSuchDatabaseException(databaseName);
+ return BOOL_FALSE;
}
+ }
+
+ }
+
+ @Override
+ public GetTablesProto getAllTables(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetTablesProto.newBuilder().addAllTable(store.getAllTables()).build();
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ }
+
+ @Override
+ public GetTableOptionsProto getAllTableOptions(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetTableOptionsProto.newBuilder().addAllTableOption(store.getAllTableOptions()).build();
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ }
+
+ @Override
+ public GetTableStatsProto getAllTableStats(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetTableStatsProto.newBuilder().addAllStat(store.getAllTableStats()).build();
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ }
+
+ @Override
+ public GetColumnsProto getAllColumns(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetColumnsProto.newBuilder().addAllColumn(store.getAllColumns()).build();
} catch (Exception e) {
- LOG.error(e);
throw new ServiceException(e);
} finally {
rlock.unlock();
}
-
}
@Override
@@ -625,6 +744,10 @@ public class CatalogServer extends AbstractService {
String databaseName = request.getDatabaseName();
String tableName = request.getTableName();
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ throw new ServiceException(databaseName + " is a system databsae. It does not contain any partitioned tables.");
+ }
+
rlock.lock();
try {
boolean contain;
@@ -658,6 +781,10 @@ public class CatalogServer extends AbstractService {
throws ServiceException {
String databaseName = request.getDatabaseName();
String tableName = request.getTableName();
+
+ if (metaDictionary.isSystemDatabase(databaseName)) {
+ throw new ServiceException(databaseName + " is a system database. Partition Method does not support yet.");
+ }
rlock.lock();
try {
@@ -721,14 +848,28 @@ public class CatalogServer extends AbstractService {
throws ServiceException {
return null;
}
+
+ @Override
+ public GetTablePartitionsProto getAllPartitions(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetTablePartitionsProto.newBuilder().addAllPart(store.getAllPartitions()).build();
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ }
@Override
public BoolProto createIndex(RpcController controller, IndexDescProto indexDesc)
throws ServiceException {
+ String databaseName = indexDesc.getTableIdentifier().getDatabaseName();
+
rlock.lock();
try {
if (store.existIndexByName(
- indexDesc.getTableIdentifier().getDatabaseName(),
+ databaseName,
indexDesc.getIndexName())) {
throw new AlreadyExistsIndexException(indexDesc.getIndexName());
}
@@ -847,6 +988,18 @@ public class CatalogServer extends AbstractService {
return BOOL_TRUE;
}
+
+ @Override
+ public GetIndexesProto getAllIndexes(RpcController controller, NullProto request) throws ServiceException {
+ rlock.lock();
+ try {
+ return GetIndexesProto.newBuilder().addAllIndex(store.getAllIndexes()).build();
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ } finally {
+ rlock.unlock();
+ }
+ }
public boolean checkIfBuiltin(FunctionType type) {
return type == GENERAL || type == AGGREGATION || type == DISTINCT_AGGREGATION;
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/AbstractTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/AbstractTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/AbstractTableDescriptor.java
new file mode 100644
index 0000000..a8b384c
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/AbstractTableDescriptor.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.SchemaProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
+import org.apache.tajo.rpc.protocolrecords.PrimitiveProtos.KeyValueSetProto;
+
+abstract class AbstractTableDescriptor implements TableDescriptor {
+
+ protected InfoSchemaMetadataDictionary dictionary;
+
+ public AbstractTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ dictionary = metadataDictionary;
+ }
+
+ protected abstract ColumnDescriptor[] getColumnDescriptors();
+
+ protected SchemaProto getSchemaProto() {
+ SchemaProto.Builder schemaBuilder = SchemaProto.newBuilder();
+ ColumnProto.Builder columnBuilder = null;
+
+ for (ColumnDescriptor columnDescriptor: getColumnDescriptors()) {
+ columnBuilder = ColumnProto.newBuilder();
+
+ columnBuilder.setName(columnDescriptor.getName().toLowerCase());
+ if (columnDescriptor.getLength() > 0) {
+ columnBuilder.setDataType(CatalogUtil.newDataTypeWithLen(columnDescriptor.getType(),
+ columnDescriptor.getLength()));
+ } else {
+ columnBuilder.setDataType(CatalogUtil.newSimpleDataType(columnDescriptor.getType()));
+ }
+
+ schemaBuilder.addFields(columnBuilder.build());
+ }
+
+ return schemaBuilder.build();
+ }
+
+ protected TableProto getTableProto() {
+ TableProto.Builder metaBuilder = TableProto.newBuilder();
+ metaBuilder.setStoreType(StoreType.SYSTEM);
+ metaBuilder.setParams(KeyValueSetProto.newBuilder().build());
+ return metaBuilder.build();
+ }
+
+ protected TableStatsProto getTableStatsProto() {
+ TableStatsProto.Builder statBuilder = TableStatsProto.newBuilder();
+ statBuilder.setNumRows(0l);
+ statBuilder.setNumBytes(0l);
+ return statBuilder.build();
+ }
+
+ @Override
+ public TableDescProto getTableDescription() {
+ TableDescProto.Builder tableBuilder = TableDescProto.newBuilder();
+
+ tableBuilder.setTableName(CatalogUtil.buildFQName(dictionary.getSystemDatabaseName(), getTableNameString()));
+ tableBuilder.setPath(dictionary.getTablePath());
+
+ tableBuilder.setSchema(CatalogUtil.getQualfiedSchema(
+ dictionary.getSystemDatabaseName() + "." + getTableNameString(),
+ getSchemaProto()));
+ tableBuilder.setMeta(getTableProto());
+ tableBuilder.setStats(getTableStatsProto());
+ return tableBuilder.build();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnDescriptor.java
new file mode 100644
index 0000000..0b3928a
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnDescriptor.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class ColumnDescriptor {
+
+ private final String name;
+ private final Type type;
+ private final int length;
+
+ public ColumnDescriptor(String columnName, Type columnType, int dataLength) {
+ name = columnName;
+ type = columnType;
+ length = dataLength;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public Type getType() {
+ return type;
+ }
+
+ public int getLength() {
+ return length;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnsTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnsTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnsTableDescriptor.java
new file mode 100644
index 0000000..85b8f20
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/ColumnsTableDescriptor.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class ColumnsTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "columns";
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("tid", Type.INT4, 0),
+ new ColumnDescriptor("column_name", Type.TEXT, 0),
+ new ColumnDescriptor("ordinal_position", Type.INT4, 0),
+ new ColumnDescriptor("data_type", Type.TEXT, 0),
+ new ColumnDescriptor("type_length", Type.INT4, 0)
+ };
+
+ public ColumnsTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/DatabasesTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/DatabasesTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/DatabasesTableDescriptor.java
new file mode 100644
index 0000000..951f6b2
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/DatabasesTableDescriptor.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class DatabasesTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "databases";
+
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("db_id", Type.INT4, 0),
+ new ColumnDescriptor("db_name", Type.TEXT, 0),
+ new ColumnDescriptor("space_id", Type.INT4, 0)
+ };
+
+ public DatabasesTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/IndexesTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/IndexesTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/IndexesTableDescriptor.java
new file mode 100644
index 0000000..a079a93
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/IndexesTableDescriptor.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class IndexesTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "indexes";
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("db_id", Type.INT4, 0),
+ new ColumnDescriptor("tid", Type.INT4, 0),
+ new ColumnDescriptor("index_name", Type.TEXT, 0),
+ new ColumnDescriptor("column_name", Type.TEXT, 0),
+ new ColumnDescriptor("data_type", Type.TEXT, 0),
+ new ColumnDescriptor("index_type", Type.TEXT, 0),
+ new ColumnDescriptor("is_unique", Type.BOOLEAN, 0),
+ new ColumnDescriptor("is_clustered", Type.BOOLEAN, 0),
+ new ColumnDescriptor("is_ascending", Type.BOOLEAN, 0)
+ };
+
+ public IndexesTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/InfoSchemaMetadataDictionary.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/InfoSchemaMetadataDictionary.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/InfoSchemaMetadataDictionary.java
new file mode 100644
index 0000000..de79caa
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/InfoSchemaMetadataDictionary.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.tajo.catalog.exception.NoSuchTableException;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.util.TUtil;
+
+public class InfoSchemaMetadataDictionary {
+
+ private static final String DATABASE_NAME = "information_schema";
+
+ private static enum DEFINED_TABLES {
+ TABLESPACES,
+ DATABASES,
+ TABLES,
+ COLUMNS,
+ INDEXES,
+ TABLEOPTIONS,
+ TABLESTATS,
+ PARTITIONS,
+ MAX_TABLE;
+ }
+
+ private List<TableDescriptor> schemaInfoTableDescriptors = new ArrayList<TableDescriptor>(
+ Collections.nCopies(DEFINED_TABLES.MAX_TABLE.ordinal(), (TableDescriptor)null));
+
+ public InfoSchemaMetadataDictionary() {
+ createSystemTableDescriptors();
+ }
+
+ private void createSystemTableDescriptors() {
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.TABLESPACES.ordinal(), new TablespacesTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.DATABASES.ordinal(), new DatabasesTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.TABLES.ordinal(), new TablesTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.COLUMNS.ordinal(), new ColumnsTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.INDEXES.ordinal(), new IndexesTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.TABLEOPTIONS.ordinal(), new TableOptionsTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.TABLESTATS.ordinal(), new TableStatsTableDescriptor(this));
+ schemaInfoTableDescriptors.set(DEFINED_TABLES.PARTITIONS.ordinal(), new PartitionsTableDescriptor(this));
+ }
+
+ public boolean isSystemDatabase(String databaseName) {
+ boolean result = false;
+
+ if (databaseName != null && !databaseName.isEmpty()) {
+ result = DATABASE_NAME.equalsIgnoreCase(databaseName);
+ }
+
+ return result;
+ }
+
+ public String getSystemDatabaseName() {
+ return DATABASE_NAME;
+ }
+
+ public List<String> getAllSystemTables() {
+ List<String> systemTableNames = TUtil.newList();
+
+ for (TableDescriptor descriptor: schemaInfoTableDescriptors) {
+ systemTableNames.add(descriptor.getTableNameString());
+ }
+
+ return systemTableNames;
+ }
+
+ private TableDescriptor getTableDescriptor(String tableName) {
+ TableDescriptor tableDescriptor = null;
+
+ if (tableName == null || tableName.isEmpty()) {
+ throw new NoSuchTableException(tableName);
+ }
+
+ tableName = tableName.toUpperCase();
+ for (int idx = 0; idx < schemaInfoTableDescriptors.size(); idx++) {
+ TableDescriptor testDescriptor = schemaInfoTableDescriptors.get(idx);
+ if (testDescriptor.getTableNameString().equalsIgnoreCase(tableName)) {
+ tableDescriptor = testDescriptor;
+ break;
+ }
+ }
+
+ return tableDescriptor;
+ }
+
+ public CatalogProtos.TableDescProto getTableDesc(String tableName) {
+ TableDescriptor tableDescriptor;
+
+ tableDescriptor = getTableDescriptor(tableName);
+ if (tableDescriptor == null) {
+ throw new NoSuchTableException(DATABASE_NAME, tableName);
+ }
+
+ return tableDescriptor.getTableDescription();
+ }
+
+ public boolean existTable(String tableName) {
+ return getTableDescriptor(tableName) != null;
+ }
+
+ protected String getTablePath() {
+ return "SYSTEM";
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/PartitionsTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/PartitionsTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/PartitionsTableDescriptor.java
new file mode 100644
index 0000000..d69c93e
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/PartitionsTableDescriptor.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class PartitionsTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "partitions";
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("pid", Type.INT4, 0),
+ new ColumnDescriptor("tid", Type.INT4, 0),
+ new ColumnDescriptor("partition_name", Type.TEXT, 0),
+ new ColumnDescriptor("ordinal_position", Type.INT4, 0),
+ new ColumnDescriptor("path", Type.TEXT, 0)
+ };
+
+ public PartitionsTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableDescriptor.java
new file mode 100644
index 0000000..4bfe4c1
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableDescriptor.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.catalog.proto.CatalogProtos;
+
+public interface TableDescriptor {
+
+ public String getTableNameString();
+
+ public CatalogProtos.TableDescProto getTableDescription();
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableOptionsTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableOptionsTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableOptionsTableDescriptor.java
new file mode 100644
index 0000000..de08111
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableOptionsTableDescriptor.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class TableOptionsTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "table_options";
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("tid", Type.INT4, 0),
+ new ColumnDescriptor("key_", Type.TEXT, 0),
+ new ColumnDescriptor("value_", Type.TEXT, 0)
+ };
+
+ public TableOptionsTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableStatsTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableStatsTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableStatsTableDescriptor.java
new file mode 100644
index 0000000..39fd364
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TableStatsTableDescriptor.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class TableStatsTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "table_stats";
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("tid", Type.INT4, 0),
+ new ColumnDescriptor("num_rows", Type.INT8, 0),
+ new ColumnDescriptor("num_bytes", Type.INT8, 0)
+ };
+
+ public TableStatsTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablesTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablesTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablesTableDescriptor.java
new file mode 100644
index 0000000..7485248
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablesTableDescriptor.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class TablesTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "tables";
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("tid", Type.INT4, 0),
+ new ColumnDescriptor("db_id", Type.INT4, 0),
+ new ColumnDescriptor("table_name", Type.TEXT, 0),
+ new ColumnDescriptor("table_type", Type.TEXT, 0),
+ new ColumnDescriptor("path", Type.TEXT, 0),
+ new ColumnDescriptor("store_type", Type.TEXT, 0)
+ };
+
+ public TablesTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablespacesTableDescriptor.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablespacesTableDescriptor.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablespacesTableDescriptor.java
new file mode 100644
index 0000000..4c21eb6
--- /dev/null
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/dictionary/TablespacesTableDescriptor.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.catalog.dictionary;
+
+import org.apache.tajo.common.TajoDataTypes.Type;
+
+class TablespacesTableDescriptor extends AbstractTableDescriptor {
+
+ private static final String TABLENAME = "tablespace";
+
+ private final ColumnDescriptor[] columns = new ColumnDescriptor[] {
+ new ColumnDescriptor("space_id", Type.INT4, 0),
+ new ColumnDescriptor("space_name", Type.TEXT, 0),
+ new ColumnDescriptor("space_handler", Type.TEXT, 0),
+ new ColumnDescriptor("space_uri", Type.TEXT, 0)
+ };
+
+ public TablespacesTableDescriptor(InfoSchemaMetadataDictionary metadataDictionary) {
+ super(metadataDictionary);
+ }
+
+ @Override
+ public String getTableNameString() {
+ return TABLENAME;
+ }
+
+ @Override
+ protected ColumnDescriptor[] getColumnDescriptors() {
+ return columns;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
index c7d55eb..a239a92 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/AbstractDBStore.java
@@ -38,12 +38,14 @@ import org.apache.tajo.exception.InternalException;
import org.apache.tajo.exception.UnimplementedException;
import org.apache.tajo.util.FileUtil;
import org.apache.tajo.util.Pair;
+import org.apache.tajo.util.TUtil;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.sql.Statement;
import java.util.*;
import static org.apache.tajo.catalog.proto.CatalogProtos.AlterTablespaceProto.AlterTablespaceCommand;
@@ -403,6 +405,37 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
return tablespaceNames;
}
+
+ @Override
+ public List<TablespaceProto> getTablespaces() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+ List<TablespaceProto> tablespaces = TUtil.newList();
+
+ try {
+ String sql = "SELECT SPACE_ID, SPACE_NAME, SPACE_HANDLER, SPACE_URI FROM " + TB_SPACES ;
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+
+ while (resultSet.next()) {
+ TablespaceProto.Builder builder = TablespaceProto.newBuilder();
+ builder.setId(resultSet.getInt("SPACE_ID"));
+ builder.setSpaceName(resultSet.getString("SPACE_NAME"));
+ builder.setHandler(resultSet.getString("SPACE_HANDLER"));
+ builder.setUri(resultSet.getString("SPACE_URI"));
+
+ tablespaces.add(builder.build());
+ }
+ return tablespaces;
+
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+ }
@Override
public TablespaceProto getTablespace(String spaceName) throws CatalogException {
@@ -593,6 +626,38 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
return databaseNames;
}
+
+ @Override
+ public List<DatabaseProto> getAllDatabases() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<DatabaseProto> databases = new ArrayList<DatabaseProto>();
+
+ try {
+ String sql = "SELECT DB_ID, DB_NAME, SPACE_ID FROM " + TB_DATABASES;
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ DatabaseProto.Builder builder = DatabaseProto.newBuilder();
+
+ builder.setId(resultSet.getInt("DB_ID"));
+ builder.setName(resultSet.getString("DB_NAME"));
+ builder.setSpaceId(resultSet.getInt("SPACE_ID"));
+
+ databases.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return databases;
+ }
private static class TableSpaceInternal {
private final int spaceId;
@@ -1450,6 +1515,163 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
}
return tables;
}
+
+ @Override
+ public List<TableDescriptorProto> getAllTables() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<TableDescriptorProto> tables = new ArrayList<TableDescriptorProto>();
+
+ try {
+ String sql = "SELECT t.TID, t.DB_ID, t." + COL_TABLES_NAME + ", t.TABLE_TYPE, t.PATH, t.STORE_TYPE, " +
+ " s.SPACE_URI FROM " + TB_TABLES + " t, " + TB_DATABASES + " d, " + TB_SPACES +
+ " s WHERE t.DB_ID = d.DB_ID AND d.SPACE_ID = s.SPACE_ID";
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ TableDescriptorProto.Builder builder = TableDescriptorProto.newBuilder();
+
+ builder.setTid(resultSet.getInt("TID"));
+ builder.setDbId(resultSet.getInt("DB_ID"));
+ String tableName = resultSet.getString(COL_TABLES_NAME);
+ builder.setName(tableName);
+ String tableTypeString = resultSet.getString("TABLE_TYPE");
+ TableType tableType = TableType.valueOf(tableTypeString);
+ builder.setTableType(tableTypeString);
+
+ if (tableType == TableType.BASE_TABLE) {
+ builder.setPath(resultSet.getString("SPACE_URI") + "/" + tableName);
+ } else {
+ builder.setPath(resultSet.getString("PATH"));
+ }
+ String storeType = resultSet.getString("STORE_TYPE");
+ if (storeType != null) {
+ storeType = storeType.trim();
+ builder.setStoreType(storeType);
+ }
+
+ tables.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return tables;
+ }
+
+ @Override
+ public List<TableOptionProto> getAllTableOptions() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<TableOptionProto> options = new ArrayList<TableOptionProto>();
+
+ try {
+ String sql = "SELECT tid, key_, value_ FROM " + TB_OPTIONS;
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ TableOptionProto.Builder builder = TableOptionProto.newBuilder();
+
+ builder.setTid(resultSet.getInt("TID"));
+
+ KeyValueProto.Builder keyValueBuilder = KeyValueProto.newBuilder();
+ keyValueBuilder.setKey(resultSet.getString("KEY_"));
+ keyValueBuilder.setValue(resultSet.getString("VALUE_"));
+ builder.setKeyval(keyValueBuilder.build());
+
+ options.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return options;
+ }
+
+ @Override
+ public List<TableStatsProto> getAllTableStats() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<TableStatsProto> stats = new ArrayList<TableStatsProto>();
+
+ try {
+ String sql = "SELECT tid, num_rows, num_bytes FROM " + TB_STATISTICS;
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ TableStatsProto.Builder builder = TableStatsProto.newBuilder();
+
+ builder.setTid(resultSet.getInt("TID"));
+ builder.setNumRows(resultSet.getLong("NUM_ROWS"));
+ builder.setNumBytes(resultSet.getLong("NUM_BYTES"));
+
+ stats.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return stats;
+ }
+
+ @Override
+ public List<ColumnProto> getAllColumns() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<ColumnProto> columns = new ArrayList<ColumnProto>();
+
+ try {
+ String sql = "SELECT TID, COLUMN_NAME, ORDINAL_POSITION, DATA_TYPE, TYPE_LENGTH FROM " + TB_COLUMNS +
+ " ORDER BY TID ASC, ORDINAL_POSITION ASC";
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ ColumnProto.Builder builder = ColumnProto.newBuilder();
+
+ builder.setTid(resultSet.getInt("TID"));
+ builder.setName(resultSet.getString("COLUMN_NAME"));
+
+ Type type = getDataType(resultSet.getString("DATA_TYPE").trim());
+ int typeLength = resultSet.getInt("TYPE_LENGTH");
+
+ if (typeLength > 0) {
+ builder.setDataType(CatalogUtil.newDataTypeWithLen(type, typeLength));
+ } else {
+ builder.setDataType(CatalogUtil.newSimpleDataType(type));
+ }
+
+ columns.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return columns;
+ }
private static final String ADD_PARTITION_SQL =
"INSERT INTO " + TB_PARTTIONS + " (TID, PARTITION_NAME, ORDINAL_POSITION, PATH) VALUES (?,?,?,?)";
@@ -1705,6 +1927,40 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
CatalogUtil.closeQuietly(pstmt);
}
}
+
+ @Override
+ public List<TablePartitionProto> getAllPartitions() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<TablePartitionProto> partitions = new ArrayList<TablePartitionProto>();
+
+ try {
+ String sql = "SELECT PID, TID, PARTITION_NAME, ORDINAL_POSITION, PATH FROM " + TB_PARTTIONS;
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ TablePartitionProto.Builder builder = TablePartitionProto.newBuilder();
+
+ builder.setPid(resultSet.getInt("PID"));
+ builder.setTid(resultSet.getInt("TID"));
+ builder.setPartitionName(resultSet.getString("PARTITION_NAME"));
+ builder.setOrdinalPosition(resultSet.getInt("ORDINAL_POSITION"));
+ builder.setPath(resultSet.getString("PATH"));
+
+ partitions.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return partitions;
+ }
@Override
@@ -1984,6 +2240,45 @@ public abstract class AbstractDBStore extends CatalogConstants implements Catalo
return protos.toArray(new IndexDescProto[protos.size()]);
}
+
+ @Override
+ public List<IndexProto> getAllIndexes() throws CatalogException {
+ Connection conn = null;
+ Statement stmt = null;
+ ResultSet resultSet = null;
+
+ List<IndexProto> indexes = new ArrayList<IndexProto>();
+
+ try {
+ String sql = "SELECT " + COL_DATABASES_PK + ", " + COL_TABLES_PK + ", INDEX_NAME, " +
+ "COLUMN_NAME, DATA_TYPE, INDEX_TYPE, IS_UNIQUE, IS_CLUSTERED, IS_ASCENDING FROM " + TB_INDEXES;
+
+ conn = getConnection();
+ stmt = conn.createStatement();
+ resultSet = stmt.executeQuery(sql);
+ while (resultSet.next()) {
+ IndexProto.Builder builder = IndexProto.newBuilder();
+
+ builder.setDbId(resultSet.getInt(COL_DATABASES_PK));
+ builder.setTId(resultSet.getInt(COL_TABLES_PK));
+ builder.setIndexName(resultSet.getString("INDEX_NAME"));
+ builder.setColumnName(resultSet.getString("COLUMN_NAME"));
+ builder.setDataType(resultSet.getString("DATA_TYPE"));
+ builder.setIndexType(resultSet.getString("INDEX_TYPE"));
+ builder.setIsUnique(resultSet.getBoolean("IS_UNIQUE"));
+ builder.setIsClustered(resultSet.getBoolean("IS_CLUSTERED"));
+ builder.setIsAscending(resultSet.getBoolean("IS_ASCENDING"));
+
+ indexes.add(builder.build());
+ }
+ } catch (SQLException se) {
+ throw new CatalogException(se);
+ } finally {
+ CatalogUtil.closeQuietly(stmt, resultSet);
+ }
+
+ return indexes;
+ }
private void resultToIndexDescProtoBuilder(IndexDescProto.Builder builder,
final ResultSet res) throws SQLException {
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
index 041fc52..ed6fedc 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/CatalogStore.java
@@ -20,9 +20,17 @@ package org.apache.tajo.catalog.store;
import org.apache.tajo.catalog.FunctionDesc;
import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.DatabaseProto;
import org.apache.tajo.catalog.proto.CatalogProtos.IndexDescProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.IndexProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescriptorProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableOptionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablePartitionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
import java.io.Closeable;
+
import org.apache.tajo.catalog.exception.CatalogException;
import java.util.Collection;
@@ -41,6 +49,8 @@ public interface CatalogStore extends Closeable {
void dropTablespace(String spaceName) throws CatalogException;
Collection<String> getAllTablespaceNames() throws CatalogException;
+
+ List<TablespaceProto> getTablespaces() throws CatalogException;
TablespaceProto getTablespace(String spaceName) throws CatalogException;
@@ -54,6 +64,8 @@ public interface CatalogStore extends Closeable {
void dropDatabase(String databaseName) throws CatalogException;
Collection<String> getAllDatabaseNames() throws CatalogException;
+
+ List<DatabaseProto> getAllDatabases() throws CatalogException;
/*************************** TABLE ******************************/
void createTable(CatalogProtos.TableDescProto desc) throws CatalogException;
@@ -67,6 +79,14 @@ public interface CatalogStore extends Closeable {
List<String> getAllTableNames(String databaseName) throws CatalogException;
void alterTable(CatalogProtos.AlterTableDescProto alterTableDescProto) throws CatalogException;
+
+ List<TableDescriptorProto> getAllTables() throws CatalogException;
+
+ List<TableOptionProto> getAllTableOptions() throws CatalogException;
+
+ List<TableStatsProto> getAllTableStats() throws CatalogException;
+
+ List<ColumnProto> getAllColumns() throws CatalogException;
void updateTableStats(CatalogProtos.UpdateTableStatsProto statsProto) throws CatalogException;
@@ -100,6 +120,8 @@ public interface CatalogStore extends Closeable {
void delPartition(String partitionName) throws CatalogException;
void dropPartitions(String tableName) throws CatalogException;
+
+ List<TablePartitionProto> getAllPartitions() throws CatalogException;
/**************************** INDEX *******************************/
void createIndex(IndexDescProto proto) throws CatalogException;
@@ -117,6 +139,8 @@ public interface CatalogStore extends Closeable {
throws CatalogException;
IndexDescProto [] getIndexes(String databaseName, String tableName) throws CatalogException;
+
+ List<IndexProto> getAllIndexes() throws CatalogException;
/************************** FUNCTION *****************************/
[3/8] tajo git commit: TAJO-269: Protocol buffer De/Serialization for
LogicalNode.
Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/engine/query/TestSelectQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestSelectQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestSelectQuery.java
index 2d9b1f8..0df4001 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestSelectQuery.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestSelectQuery.java
@@ -26,18 +26,18 @@ import org.apache.tajo.catalog.Schema;
import org.apache.tajo.catalog.TableDesc;
import org.apache.tajo.client.QueryStatus;
import org.apache.tajo.common.TajoDataTypes.Type;
+import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.conf.TajoConf.ConfVars;
-import org.apache.tajo.engine.utils.test.ErrorInjectionRewriter;
import org.apache.tajo.jdbc.TajoResultSet;
+import org.apache.tajo.plan.rewrite.BaseLogicalPlanRewriteRuleProvider;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.storage.StorageConstants;
import org.apache.tajo.util.KeyValueSet;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.sql.ResultSet;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TimeZone;
+import java.util.*;
import static org.apache.tajo.TajoConstants.DEFAULT_DATABASE_NAME;
import static org.junit.Assert.*;
@@ -416,11 +416,23 @@ public class TestSelectQuery extends QueryTestCaseBase {
cleanupQuery(res);
}
+ public static class RulesForErrorInjection extends BaseLogicalPlanRewriteRuleProvider {
+ public RulesForErrorInjection(TajoConf conf) {
+ super(conf);
+ }
+
+ @Override
+ public Collection<Class<? extends LogicalPlanRewriteRule>> getPostRules() {
+ List<Class<? extends LogicalPlanRewriteRule>> addedRules = Lists.newArrayList(super.getPostRules());
+ return addedRules;
+ }
+ }
+
@Test
public final void testQueryMasterTaskInitError() throws Exception {
// In this testcase we can check that a TajoClient receives QueryMasterTask's init error message.
- testingCluster.setAllWorkersConfValue("tajo.plan.rewriter.classes",
- ErrorInjectionRewriter.class.getCanonicalName());
+ testingCluster.setAllWorkersConfValue(ConfVars.LOGICAL_PLAN_REWRITE_RULE_PROVIDER_CLASS.name(),
+ RulesForErrorInjection.class.getCanonicalName());
try {
// If client can't receive error status, thread runs forever.
@@ -450,7 +462,8 @@ public class TestSelectQuery extends QueryTestCaseBase {
// If query runs more than 10 secs, test is fail.
assertFalse(t.isAlive());
} finally {
- testingCluster.setAllWorkersConfValue("tajo.plan.rewriter.classes", "");
+ // recover the rewrite rule provider to default
+ testingCluster.setAllWorkersConfValue(ConfVars.LOGICAL_PLAN_REWRITE_RULE_PROVIDER_CLASS.name(), "");
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTruncateTable.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTruncateTable.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTruncateTable.java
index 455213b..1be21e4 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTruncateTable.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestTruncateTable.java
@@ -25,8 +25,10 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.sql.ResultSet;
+import java.util.List;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
@Category(IntegrationTest.class)
public class TestTruncateTable extends QueryTestCaseBase {
@@ -63,11 +65,6 @@ public class TestTruncateTable extends QueryTestCaseBase {
}
}
-
- /*
- Currently TajoClient can't throw exception when plan error.
- The following test cast should be uncommented after https://issues.apache.org/jira/browse/TAJO-762
-
@Test
public final void testTruncateExternalTable() throws Exception {
try {
@@ -100,5 +97,4 @@ public class TestTruncateTable extends QueryTestCaseBase {
executeString("DROP TABLE truncate_table2 PURGE");
}
}
- */
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
index 2af5ce9..668ba70 100644
--- a/tajo-core/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
+++ b/tajo-core/src/test/java/org/apache/tajo/engine/query/TestWindowQuery.java
@@ -272,7 +272,8 @@ public class TestWindowQuery extends QueryTestCaseBase {
TajoTestingCluster.createTable("firstvaluetime", schema, tableOptions, data, 1);
try {
- ResultSet res = executeString("select id, first_value(time) over ( partition by id order by time ) as time_first from firstvaluetime");
+ ResultSet res = executeString(
+ "select id, first_value(time) over ( partition by id order by time ) as time_first from firstvaluetime");
String ascExpected = "id,time_first\n" +
"-------------------------------\n" +
"1,12:11:12\n" +
@@ -306,7 +307,8 @@ public class TestWindowQuery extends QueryTestCaseBase {
TajoTestingCluster.createTable("lastvaluetime", schema, tableOptions, data, 1);
try {
- ResultSet res = executeString("select id, last_value(time) over ( partition by id order by time ) as time_last from lastvaluetime");
+ ResultSet res = executeString(
+ "select id, last_value(time) over ( partition by id order by time ) as time_last from lastvaluetime");
String ascExpected = "id,time_last\n" +
"-------------------------------\n" +
"1,12:11:12\n" +
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-core/src/test/java/org/apache/tajo/master/TestGlobalPlanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/master/TestGlobalPlanner.java b/tajo-core/src/test/java/org/apache/tajo/master/TestGlobalPlanner.java
index c908737..d0f7cf4 100644
--- a/tajo-core/src/test/java/org/apache/tajo/master/TestGlobalPlanner.java
+++ b/tajo-core/src/test/java/org/apache/tajo/master/TestGlobalPlanner.java
@@ -126,9 +126,9 @@ public class TestGlobalPlanner {
private MasterPlan buildPlan(String sql) throws PlanningException, IOException {
Expr expr = sqlAnalyzer.parse(sql);
- LogicalPlan plan = planner.createPlan(LocalTajoTestingUtility.createDummyContext(util.getConfiguration()), expr);
- optimizer.optimize(plan);
- QueryContext context = new QueryContext(util.getConfiguration());
+ QueryContext context = LocalTajoTestingUtility.createDummyContext(util.getConfiguration());
+ LogicalPlan plan = planner.createPlan(context, expr);
+ optimizer.optimize(context, plan);
MasterPlan masterPlan = new MasterPlan(LocalTajoTestingUtility.newQueryId(), context, plan);
globalPlanner.build(masterPlan);
return masterPlan;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalOptimizer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalOptimizer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalOptimizer.java
index 750e64e..18a8859 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalOptimizer.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalOptimizer.java
@@ -18,16 +18,19 @@
package org.apache.tajo.plan;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.tajo.ConfigKey;
import org.apache.tajo.OverridableConf;
import org.apache.tajo.SessionVars;
import org.apache.tajo.algebra.JoinType;
import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.conf.TajoConf.ConfVars;
+import org.apache.tajo.util.ReflectionUtil;
import org.apache.tajo.util.graph.DirectedGraphCursor;
import org.apache.tajo.plan.expr.AlgebraicUtil;
import org.apache.tajo.plan.expr.EvalNode;
@@ -37,14 +40,10 @@ import org.apache.tajo.plan.joinorder.JoinGraph;
import org.apache.tajo.plan.joinorder.JoinOrderAlgorithm;
import org.apache.tajo.plan.logical.*;
import org.apache.tajo.plan.rewrite.*;
-import org.apache.tajo.plan.rewrite.rules.FilterPushDownRule;
-import org.apache.tajo.plan.rewrite.rules.PartitionedTableRewriter;
-import org.apache.tajo.plan.rewrite.rules.ProjectionPushDownRule;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.plan.visitor.BasicLogicalPlanVisitor;
import java.util.LinkedHashSet;
-import java.util.List;
import java.util.Set;
import java.util.Stack;
@@ -58,47 +57,36 @@ import static org.apache.tajo.plan.joinorder.GreedyHeuristicJoinOrderAlgorithm.g
public class LogicalOptimizer {
private static final Log LOG = LogFactory.getLog(LogicalOptimizer.class.getName());
- private BasicQueryRewriteEngine rulesBeforeJoinOpt;
- private BasicQueryRewriteEngine rulesAfterToJoinOpt;
+ private BaseLogicalPlanRewriteEngine rulesBeforeJoinOpt;
+ private BaseLogicalPlanRewriteEngine rulesAfterToJoinOpt;
private JoinOrderAlgorithm joinOrderAlgorithm = new GreedyHeuristicJoinOrderAlgorithm();
- public LogicalOptimizer(TajoConf systemConf) {
- rulesBeforeJoinOpt = new BasicQueryRewriteEngine();
- if (systemConf.getBoolVar(ConfVars.$TEST_FILTER_PUSHDOWN_ENABLED)) {
- rulesBeforeJoinOpt.addRewriteRule(new FilterPushDownRule());
- }
+ public LogicalOptimizer(TajoConf conf) {
- rulesAfterToJoinOpt = new BasicQueryRewriteEngine();
- rulesAfterToJoinOpt.addRewriteRule(new ProjectionPushDownRule());
- rulesAfterToJoinOpt.addRewriteRule(new PartitionedTableRewriter(systemConf));
-
- // Currently, it is only used for some test cases to inject exception manually.
- String userDefinedRewriterClass = systemConf.get("tajo.plan.rewriter.classes");
- if (userDefinedRewriterClass != null && !userDefinedRewriterClass.isEmpty()) {
- for (String eachRewriterClass : userDefinedRewriterClass.split(",")) {
- try {
- RewriteRule rule = (RewriteRule) Class.forName(eachRewriterClass).newInstance();
- rulesAfterToJoinOpt.addRewriteRule(rule);
- } catch (Exception e) {
- LOG.error("Can't initiate a Rewriter object: " + eachRewriterClass, e);
- continue;
- }
- }
- }
+ Class clazz = conf.getClassVar(ConfVars.LOGICAL_PLAN_REWRITE_RULE_PROVIDER_CLASS);
+ LogicalPlanRewriteRuleProvider provider = (LogicalPlanRewriteRuleProvider) ReflectionUtil.newInstance(clazz, conf);
+
+ rulesBeforeJoinOpt = new BaseLogicalPlanRewriteEngine();
+ rulesBeforeJoinOpt.addRewriteRule(provider.getPreRules());
+ rulesAfterToJoinOpt = new BaseLogicalPlanRewriteEngine();
+ rulesAfterToJoinOpt.addRewriteRule(provider.getPostRules());
}
- public void addRuleAfterToJoinOpt(RewriteRule rewriteRule) {
+ public void addRuleAfterToJoinOpt(LogicalPlanRewriteRule rewriteRule) {
if (rewriteRule != null) {
rulesAfterToJoinOpt.addRewriteRule(rewriteRule);
}
}
+ @VisibleForTesting
public LogicalNode optimize(LogicalPlan plan) throws PlanningException {
- return optimize(null, plan);
+ OverridableConf conf = new OverridableConf(new TajoConf(),
+ ConfigKey.ConfigType.SESSION, ConfigKey.ConfigType.QUERY, ConfigKey.ConfigType.SYSTEM);
+ return optimize(conf, plan);
}
public LogicalNode optimize(OverridableConf context, LogicalPlan plan) throws PlanningException {
- rulesBeforeJoinOpt.rewrite(plan);
+ rulesBeforeJoinOpt.rewrite(context, plan);
DirectedGraphCursor<String, BlockEdge> blockCursor =
new DirectedGraphCursor<String, BlockEdge>(plan.getQueryBlockGraph(), plan.getRootBlock().getName());
@@ -111,7 +99,7 @@ public class LogicalOptimizer {
} else {
LOG.info("Skip Join Optimized.");
}
- rulesAfterToJoinOpt.rewrite(plan);
+ rulesAfterToJoinOpt.rewrite(context, plan);
return plan.getRootBlock().getRoot();
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanPreprocessor.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanPreprocessor.java b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanPreprocessor.java
index 7c29099..544f83a 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanPreprocessor.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanPreprocessor.java
@@ -112,7 +112,7 @@ public class LogicalPlanPreprocessor extends BaseAlgebraVisitor<LogicalPlanner.P
throw new NoSuchColumnException(CatalogUtil.buildFQName(qualifier, "*"));
}
- Schema schema = relationOp.getTableSchema();
+ Schema schema = relationOp.getLogicalSchema();
Column[] resolvedColumns = new Column[schema.size()];
return schema.getColumns().toArray(resolvedColumns);
} else { // if a column reference is not qualified
@@ -123,7 +123,7 @@ public class LogicalPlanPreprocessor extends BaseAlgebraVisitor<LogicalPlanner.P
while (iterator.hasNext()) {
relationOp = iterator.next();
- schema = relationOp.getTableSchema();
+ schema = relationOp.getLogicalSchema();
resolvedColumns.addAll(schema.getColumns());
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
index 1a426e0..eebee6f 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
@@ -144,6 +144,7 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
// Add Root Node
LogicalRootNode root = plan.createNode(LogicalRootNode.class);
+
root.setInSchema(topMostNode.getOutSchema());
root.setChild(topMostNode);
root.setOutSchema(topMostNode.getOutSchema());
@@ -257,9 +258,9 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
// Set ProjectionNode
projectionNode = context.queryBlock.getNodeFromExpr(projection);
- projectionNode.setInSchema(child.getOutSchema());
- projectionNode.setTargets(targets);
+ projectionNode.init(projection.isDistinct(), targets);
projectionNode.setChild(child);
+ projectionNode.setInSchema(child.getOutSchema());
if (projection.isDistinct() && block.hasNode(NodeType.GROUP_BY)) {
throw new VerifyException("Cannot support grouping and distinct at the same time yet");
@@ -521,7 +522,7 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
} else if (projectable instanceof RelationNode) {
RelationNode relationNode = (RelationNode) projectable;
- verifyIfTargetsCanBeEvaluated(relationNode.getTableSchema(), (Projectable) relationNode);
+ verifyIfTargetsCanBeEvaluated(relationNode.getLogicalSchema(), (Projectable) relationNode);
} else {
verifyIfTargetsCanBeEvaluated(projectable.getInSchema(), projectable);
@@ -1300,7 +1301,7 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
private static LinkedHashSet<Target> createFieldTargetsFromRelation(QueryBlock block, RelationNode relationNode,
Set<String> newlyEvaluatedRefNames) {
LinkedHashSet<Target> targets = Sets.newLinkedHashSet();
- for (Column column : relationNode.getTableSchema().getColumns()) {
+ for (Column column : relationNode.getLogicalSchema().getColumns()) {
String aliasName = block.namedExprsMgr.checkAndGetIfAliasedColumn(column.getQualifiedName());
if (aliasName != null) {
targets.add(new Target(new FieldEval(column), aliasName));
@@ -1569,7 +1570,7 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
}
if (child instanceof Projectable) {
- Projectable projectionNode = (Projectable) insertNode.getChild();
+ Projectable projectionNode = (Projectable)insertNode.getChild();
// Modifying projected columns by adding NULL constants
// It is because that table appender does not support target columns to be written.
@@ -2017,7 +2018,7 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
return false;
}
- if (columnRefs.size() > 0 && !node.getTableSchema().containsAll(columnRefs)) {
+ if (columnRefs.size() > 0 && !node.getLogicalSchema().containsAll(columnRefs)) {
return false;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/Target.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/Target.java b/tajo-plan/src/main/java/org/apache/tajo/plan/Target.java
index f49a93d..a5c39b8 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/Target.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/Target.java
@@ -20,17 +20,20 @@ package org.apache.tajo.plan;
import com.google.gson.annotations.Expose;
import org.apache.tajo.catalog.Column;
+import org.apache.tajo.common.ProtoObject;
import org.apache.tajo.common.TajoDataTypes.DataType;
import org.apache.tajo.json.GsonObject;
import org.apache.tajo.plan.expr.EvalNode;
import org.apache.tajo.plan.expr.FieldEval;
+import org.apache.tajo.plan.serder.LogicalNodeSerializer;
import org.apache.tajo.plan.serder.PlanGsonHelper;
+import org.apache.tajo.plan.serder.PlanProto;
import org.apache.tajo.util.TUtil;
/**
* A Target contains how to evaluate an expression and its alias name.
*/
-public class Target implements Cloneable, GsonObject {
+public class Target implements Cloneable, GsonObject, ProtoObject<PlanProto.Target> {
@Expose private EvalNode expr;
@Expose private Column column;
@Expose private String alias = null;
@@ -46,8 +49,7 @@ public class Target implements Cloneable, GsonObject {
String normalized = alias;
// If an expr is a column reference and its alias is equivalent to column name, ignore a given alias.
- if (eval instanceof FieldEval
- && eval.getName().equals(normalized)) {
+ if (eval instanceof FieldEval && eval.getName().equals(normalized)) {
column = ((FieldEval) eval).getColumnRef();
} else {
column = new Column(normalized, eval.getValueType());
@@ -127,4 +129,9 @@ public class Target implements Cloneable, GsonObject {
public String toJson() {
return PlanGsonHelper.toJson(this, Target.class);
}
+
+ @Override
+ public PlanProto.Target getProto() {
+ return LogicalNodeSerializer.convertTarget(this);
+ }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
index 542eae8..ca8c110 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/AggregationFunctionCallEval.java
@@ -27,13 +27,14 @@ import org.apache.tajo.plan.function.AggFunction;
import org.apache.tajo.plan.function.FunctionContext;
import org.apache.tajo.storage.Tuple;
import org.apache.tajo.storage.VTuple;
+import org.apache.tajo.util.TUtil;
public class AggregationFunctionCallEval extends FunctionEval implements Cloneable {
- @Expose protected AggFunction instance;
@Expose boolean intermediatePhase = false;
@Expose boolean finalPhase = true;
@Expose String alias;
+ protected AggFunction instance;
private Tuple params;
protected AggregationFunctionCallEval(EvalType type, FunctionDesc desc, AggFunction instance, EvalNode[] givenArgs) {
@@ -91,6 +92,10 @@ public class AggregationFunctionCallEval extends FunctionEval implements Cloneab
}
}
+ public boolean hasAlias() {
+ return this.alias != null;
+ }
+
public void setAlias(String alias) { this.alias = alias; }
public String getAlias() { return this.alias; }
@@ -106,6 +111,22 @@ public class AggregationFunctionCallEval extends FunctionEval implements Cloneab
return clone;
}
+ public boolean isIntermediatePhase() {
+ return intermediatePhase;
+ }
+
+ public void setIntermediatePhase(boolean flag) {
+ this.intermediatePhase = flag;
+ }
+
+ public void setFinalPhase(boolean flag) {
+ this.finalPhase = flag;
+ }
+
+ public boolean isFinalPhase() {
+ return finalPhase;
+ }
+
public void setFirstPhase() {
this.finalPhase = false;
this.intermediatePhase = false;
@@ -120,4 +141,19 @@ public class AggregationFunctionCallEval extends FunctionEval implements Cloneab
this.finalPhase = false;
this.intermediatePhase = true;
}
+
+ public boolean equals(Object obj) {
+ if (obj instanceof AggregationFunctionCallEval) {
+ AggregationFunctionCallEval other = (AggregationFunctionCallEval) obj;
+
+ boolean eq = super.equals(other);
+ eq &= instance.equals(other.instance);
+ eq &= intermediatePhase == other.intermediatePhase;
+ eq &= finalPhase == other.finalPhase;
+ eq &= TUtil.checkEquals(alias, other.alias);
+ return eq;
+ }
+
+ return false;
+ }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
index 638383a..dcb7285 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/EvalNode.java
@@ -20,17 +20,20 @@ package org.apache.tajo.plan.expr;
import com.google.gson.annotations.Expose;
import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.common.ProtoObject;
import org.apache.tajo.common.TajoDataTypes.DataType;
import org.apache.tajo.datum.Datum;
import org.apache.tajo.json.GsonObject;
+import org.apache.tajo.plan.serder.EvalNodeSerializer;
import org.apache.tajo.plan.serder.PlanGsonHelper;
+import org.apache.tajo.plan.serder.PlanProto;
import org.apache.tajo.storage.Tuple;
/**
* An annotated expression which includes actual data domains.
* It is also used for evaluation.
*/
-public abstract class EvalNode implements Cloneable, GsonObject {
+public abstract class EvalNode implements Cloneable, GsonObject, ProtoObject<PlanProto.EvalNodeTree> {
@Expose protected EvalType type;
public EvalNode() {
@@ -71,4 +74,9 @@ public abstract class EvalNode implements Cloneable, GsonObject {
evalNode.type = type;
return evalNode;
}
+
+ @Override
+ public PlanProto.EvalNodeTree getProto() {
+ return EvalNodeSerializer.serialize(this);
+ }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
index 84b4a45..0ff5927 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/expr/WindowFunctionEval.java
@@ -86,6 +86,17 @@ public class WindowFunctionEval extends AggregationFunctionCallEval implements C
return funcDesc.getReturnType();
}
+ public boolean equals(Object obj) {
+ if (obj instanceof WindowFunctionEval) {
+ WindowFunctionEval other = (WindowFunctionEval) obj;
+ boolean eq = TUtil.checkEquals(sortSpecs, other.sortSpecs);
+ eq &= TUtil.checkEquals(windowFrame, other.windowFrame);
+ return eq;
+ } else {
+ return false;
+ }
+ }
+
@Override
public Object clone() throws CloneNotSupportedException {
WindowFunctionEval windowFunctionEval = (WindowFunctionEval) super.clone();
@@ -95,6 +106,7 @@ public class WindowFunctionEval extends AggregationFunctionCallEval implements C
windowFunctionEval.sortSpecs[i] = (SortSpec) sortSpecs[i].clone();
}
}
+ windowFunctionEval.windowFrame = windowFrame.clone();
return windowFunctionEval;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTableNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTableNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTableNode.java
index e9e2467..e926dce 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTableNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTableNode.java
@@ -43,6 +43,16 @@ public class AlterTableNode extends LogicalNode {
super(pid, NodeType.ALTER_TABLE);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public String getTableName() {
return tableName;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTablespaceNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTablespaceNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTablespaceNode.java
index 7b79cc1..8b68dbe 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTablespaceNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/AlterTablespaceNode.java
@@ -23,9 +23,6 @@ import com.google.common.base.Objects;
import com.google.gson.annotations.Expose;
import org.apache.tajo.algebra.AlterTablespaceSetType;
import org.apache.tajo.plan.PlanString;
-import org.apache.tajo.plan.logical.LogicalNode;
-import org.apache.tajo.plan.logical.LogicalNodeVisitor;
-import org.apache.tajo.plan.logical.NodeType;
public class AlterTablespaceNode extends LogicalNode implements Cloneable {
@@ -38,6 +35,16 @@ public class AlterTablespaceNode extends LogicalNode implements Cloneable {
super(pid, NodeType.ALTER_TABLESPACE);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public String getTablespaceName() {
return tablespaceName;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/BinaryNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/BinaryNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/BinaryNode.java
index 709ef34..70b1bc4 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/BinaryNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/BinaryNode.java
@@ -28,6 +28,22 @@ public abstract class BinaryNode extends LogicalNode implements Cloneable, GsonO
public BinaryNode(int pid, NodeType nodeType) {
super(pid, nodeType);
}
+
+ @Override
+ public int childNum() {
+ return 2;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ if (idx == 0) {
+ return leftChild;
+ } else if (idx == 1) {
+ return rightChild;
+ } else {
+ throw new ArrayIndexOutOfBoundsException(idx);
+ }
+ }
public <T extends LogicalNode> T getLeftChild() {
return (T) this.leftChild;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateDatabaseNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateDatabaseNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateDatabaseNode.java
index e3f73fe..28bd4cd 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateDatabaseNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateDatabaseNode.java
@@ -30,6 +30,16 @@ public class CreateDatabaseNode extends LogicalNode implements Cloneable {
super(pid, NodeType.CREATE_DATABASE);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public void init(String databaseName, boolean ifNotExists) {
this.databaseName = databaseName;
this.ifNotExists = ifNotExists;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateTableNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateTableNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateTableNode.java
index d03da6a..0976ab5 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateTableNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/CreateTableNode.java
@@ -99,11 +99,12 @@ public class CreateTableNode extends StoreTableNode implements Cloneable {
public boolean equals(Object obj) {
if (obj instanceof CreateTableNode) {
CreateTableNode other = (CreateTableNode) obj;
- return super.equals(other)
- && this.schema.equals(other.schema)
- && this.external == other.external
- && TUtil.checkEquals(path, other.path)
- && ifNotExists == other.ifNotExists;
+ boolean eq = super.equals(other);
+ eq &= this.schema.equals(other.schema);
+ eq &= this.external == other.external;
+ eq &= TUtil.checkEquals(path, other.path);
+ eq &= ifNotExists == other.ifNotExists;;
+ return eq;
} else {
return false;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DistinctGroupbyNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DistinctGroupbyNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DistinctGroupbyNode.java
index e31e488..a40ad59 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DistinctGroupbyNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DistinctGroupbyNode.java
@@ -34,19 +34,19 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
private GroupbyNode groupbyPlan;
@Expose
- private List<GroupbyNode> groupByNodes;
+ private List<GroupbyNode> subGroupbyPlan;
@Expose
private Target[] targets;
@Expose
- private Column[] groupingColumns;
+ private Column[] groupingColumns = PlannerUtil.EMPTY_COLUMNS;
@Expose
- private int[] resultColumnIds;
+ private int[] resultColumnIds = new int[]{};
/** Aggregation Functions */
- @Expose private AggregationFunctionCallEval[] aggrFunctions;
+ @Expose private AggregationFunctionCallEval[] aggrFunctions = PlannerUtil.EMPTY_AGG_FUNCS;
public DistinctGroupbyNode(int pid) {
super(pid, NodeType.DISTINCT_GROUP_BY);
@@ -54,7 +54,7 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
@Override
public boolean hasTargets() {
- return targets != null && targets.length > 0;
+ return targets.length > 0;
}
@Override
@@ -72,19 +72,19 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
}
}
- public void setGroupbyNodes(List<GroupbyNode> groupByNodes) {
- this.groupByNodes = groupByNodes;
+ public void setSubPlans(List<GroupbyNode> groupByNodes) {
+ this.subGroupbyPlan = groupByNodes;
}
- public List<GroupbyNode> getGroupByNodes() {
- return groupByNodes;
+ public List<GroupbyNode> getSubPlans() {
+ return subGroupbyPlan;
}
public final Column[] getGroupingColumns() {
return groupingColumns;
}
- public final void setGroupColumns(Column[] groupingColumns) {
+ public final void setGroupingColumns(Column[] groupingColumns) {
this.groupingColumns = groupingColumns;
}
@@ -119,12 +119,12 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
}
}
- if (groupByNodes != null) {
- cloneNode.groupByNodes = new ArrayList<GroupbyNode>();
- for (GroupbyNode eachNode: groupByNodes) {
+ if (subGroupbyPlan != null) {
+ cloneNode.subGroupbyPlan = new ArrayList<GroupbyNode>();
+ for (GroupbyNode eachNode: subGroupbyPlan) {
GroupbyNode groupbyNode = (GroupbyNode)eachNode.clone();
groupbyNode.setPID(-1);
- cloneNode.groupByNodes.add(groupbyNode);
+ cloneNode.subGroupbyPlan.add(groupbyNode);
}
}
@@ -151,7 +151,7 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
sb.append("grouping set=").append(TUtil.arrayToString(groupingColumns));
sb.append(", ");
}
- for (GroupbyNode eachNode: groupByNodes) {
+ for (GroupbyNode eachNode: subGroupbyPlan) {
sb.append(", groupbyNode=").append(eachNode.toString());
}
sb.append(")");
@@ -164,8 +164,9 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
DistinctGroupbyNode other = (DistinctGroupbyNode) obj;
boolean eq = super.equals(other);
eq = eq && TUtil.checkEquals(groupingColumns, other.groupingColumns);
- eq = eq && TUtil.checkEquals(groupByNodes, other.groupByNodes);
+ eq = eq && TUtil.checkEquals(subGroupbyPlan, other.subGroupbyPlan);
eq = eq && TUtil.checkEquals(targets, other.targets);
+ eq = eq && TUtil.checkEquals(resultColumnIds, other.resultColumnIds);
return eq;
} else {
return false;
@@ -194,7 +195,7 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
sb.append("(");
String prefix = "";
- for (GroupbyNode eachNode: groupByNodes) {
+ for (GroupbyNode eachNode: subGroupbyPlan) {
if (eachNode.hasAggFunctions()) {
AggregationFunctionCallEval[] aggrFunctions = eachNode.getAggFunctions();
for (int j = 0; j < aggrFunctions.length; j++) {
@@ -218,7 +219,7 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
planStr.addDetail("out schema:").appendDetail(getOutSchema().toString());
planStr.addDetail("in schema:").appendDetail(getInSchema().toString());
- for (GroupbyNode eachNode: groupByNodes) {
+ for (GroupbyNode eachNode: subGroupbyPlan) {
planStr.addDetail("\t").appendDetail("distinct: " + eachNode.isDistinct())
.appendDetail(", " + eachNode.getShortPlanString());
}
@@ -236,7 +237,7 @@ public class DistinctGroupbyNode extends UnaryNode implements Projectable, Clone
}
}
}
- for (GroupbyNode eachGroupbyNode: groupByNodes) {
+ for (GroupbyNode eachGroupbyNode: subGroupbyPlan) {
if (eachGroupbyNode.getGroupingColumns() != null && eachGroupbyNode.getGroupingColumns().length > 0) {
for (Column eachColumn: eachGroupbyNode.getGroupingColumns()) {
if (!shuffleKeyColumns.contains(eachColumn)) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropDatabaseNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropDatabaseNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropDatabaseNode.java
index b88c384..c566bf5 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropDatabaseNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropDatabaseNode.java
@@ -24,12 +24,22 @@ import org.apache.tajo.plan.PlanString;
public class DropDatabaseNode extends LogicalNode implements Cloneable {
@Expose private String databaseName;
- @Expose private boolean ifExists;
+ @Expose private boolean ifExists = false;
public DropDatabaseNode(int pid) {
super(pid, NodeType.DROP_DATABASE);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public void init(String databaseName, boolean ifExists) {
this.databaseName = databaseName;
this.ifExists = ifExists;
@@ -55,7 +65,10 @@ public class DropDatabaseNode extends LogicalNode implements Cloneable {
public boolean equals(Object obj) {
if (obj instanceof DropDatabaseNode) {
DropDatabaseNode other = (DropDatabaseNode) obj;
- return super.equals(other) && this.databaseName.equals(other.databaseName) && ifExists == other.ifExists;
+ boolean eq = super.equals(other);
+ eq &= this.databaseName.equals(other.databaseName);
+ eq &= ifExists == other.ifExists;
+ return eq;
} else {
return false;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropTableNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropTableNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropTableNode.java
index 1a61852..5bde21b 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropTableNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/DropTableNode.java
@@ -30,6 +30,16 @@ public class DropTableNode extends LogicalNode implements Cloneable {
super(pid, NodeType.DROP_TABLE);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public void init(String tableName, boolean ifExists, boolean purge) {
this.tableName = tableName;
this.ifExists = ifExists;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/EvalExprNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/EvalExprNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/EvalExprNode.java
index 2519165..0f96575 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/EvalExprNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/EvalExprNode.java
@@ -35,6 +35,16 @@ public class EvalExprNode extends LogicalNode implements Projectable {
}
@Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
+ @Override
public boolean hasTargets() {
return true;
}
@@ -42,7 +52,7 @@ public class EvalExprNode extends LogicalNode implements Projectable {
@Override
public void setTargets(Target[] targets) {
this.exprs = targets;
- setOutSchema(PlannerUtil.targetToSchema(targets));
+ this.setOutSchema(PlannerUtil.targetToSchema(targets));
}
@Override
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/GroupbyNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/GroupbyNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/GroupbyNode.java
index 2c74ce3..4a18cb4 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/GroupbyNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/GroupbyNode.java
@@ -18,6 +18,7 @@
package org.apache.tajo.plan.logical;
+import com.google.common.base.Preconditions;
import com.google.gson.annotations.Expose;
import org.apache.tajo.catalog.Column;
import org.apache.tajo.plan.PlanString;
@@ -27,10 +28,10 @@ import org.apache.tajo.plan.expr.AggregationFunctionCallEval;
import org.apache.tajo.util.TUtil;
public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
- /** Grouping key sets */
- @Expose private Column [] groupingColumns;
+ /** Grouping key sets */
+ @Expose private Column [] groupingKeys = PlannerUtil.EMPTY_COLUMNS;
/** Aggregation Functions */
- @Expose private AggregationFunctionCallEval [] aggrFunctions;
+ @Expose private AggregationFunctionCallEval [] aggrFunctions = PlannerUtil.EMPTY_AGG_FUNCS;
/**
* It's a list of targets. The grouping columns should be followed by aggregation functions.
* aggrFunctions keep actual aggregation functions, but it only contains field references.
@@ -42,16 +43,20 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
super(pid, NodeType.GROUP_BY);
}
+ public int groupingKeyNum() {
+ return groupingKeys.length;
+ }
+
public final boolean isEmptyGrouping() {
- return groupingColumns == null || groupingColumns.length == 0;
+ return groupingKeys.length == 0;
}
- public void setGroupingColumns(Column [] groupingColumns) {
- this.groupingColumns = groupingColumns;
+ public void setGroupingColumns(Column [] groupingKeys) {
+ this.groupingKeys = groupingKeys;
}
public final Column [] getGroupingColumns() {
- return this.groupingColumns;
+ return this.groupingKeys;
}
public final boolean isDistinct() {
@@ -63,7 +68,11 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
}
public boolean hasAggFunctions() {
- return this.aggrFunctions != null;
+ return aggrFunctions.length > 0;
+ }
+
+ public int aggregationFunctionNum() {
+ return this.aggrFunctions.length;
}
public AggregationFunctionCallEval[] getAggFunctions() {
@@ -71,6 +80,7 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
}
public void setAggFunctions(AggregationFunctionCallEval[] evals) {
+ Preconditions.checkNotNull(evals);
this.aggrFunctions = evals;
}
@@ -96,8 +106,8 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
public String toString() {
StringBuilder sb = new StringBuilder("GroupBy (");
- if (groupingColumns != null || groupingColumns.length > 0) {
- sb.append("grouping set=").append(TUtil.arrayToString(groupingColumns));
+ if (groupingKeys != null || groupingKeys.length > 0) {
+ sb.append("grouping set=").append(TUtil.arrayToString(groupingKeys));
sb.append(", ");
}
if (hasAggFunctions()) {
@@ -112,7 +122,8 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
if (obj instanceof GroupbyNode) {
GroupbyNode other = (GroupbyNode) obj;
boolean eq = super.equals(other);
- eq = eq && TUtil.checkEquals(groupingColumns, other.groupingColumns);
+ eq = eq && isDistinct() == other.isDistinct();
+ eq = eq && TUtil.checkEquals(groupingKeys, other.groupingKeys);
eq = eq && TUtil.checkEquals(aggrFunctions, other.aggrFunctions);
eq = eq && TUtil.checkEquals(targets, other.targets);
return eq;
@@ -124,10 +135,10 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
@Override
public Object clone() throws CloneNotSupportedException {
GroupbyNode grp = (GroupbyNode) super.clone();
- if (groupingColumns != null) {
- grp.groupingColumns = new Column[groupingColumns.length];
- for (int i = 0; i < groupingColumns.length; i++) {
- grp.groupingColumns[i] = groupingColumns[i];
+ if (groupingKeys != null) {
+ grp.groupingKeys = new Column[groupingKeys.length];
+ for (int i = 0; i < groupingKeys.length; i++) {
+ grp.groupingKeys[i] = groupingKeys[i];
}
}
@@ -151,7 +162,7 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
public String getShortPlanString() {
StringBuilder sb = new StringBuilder();
sb.append(getType().name() + "(" + getPID() + ")").append("(");
- Column [] groupingColumns = this.groupingColumns;
+ Column [] groupingColumns = this.groupingKeys;
for (int j = 0; j < groupingColumns.length; j++) {
sb.append(groupingColumns[j].getSimpleName());
if(j < groupingColumns.length - 1) {
@@ -196,7 +207,7 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
StringBuilder sb = new StringBuilder();
sb.append("(");
- Column [] groupingColumns = this.groupingColumns;
+ Column [] groupingColumns = this.groupingKeys;
for (int j = 0; j < groupingColumns.length; j++) {
sb.append(groupingColumns[j].getSimpleName());
if(j < groupingColumns.length - 1) {
@@ -243,7 +254,7 @@ public class GroupbyNode extends UnaryNode implements Projectable, Cloneable {
* If so, it returns TRUE. Otherwise, it returns FALSE.
*/
public boolean isAggregationColumn(String simpleName) {
- for (int i = groupingColumns.length; i < targets.length; i++) {
+ for (int i = groupingKeys.length; i < targets.length; i++) {
if (simpleName.equals(targets[i].getNamedColumn().getSimpleName()) ||
simpleName.equals(targets[i].getAlias())) {
return true;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/InsertNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/InsertNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/InsertNode.java
index d1d8582..769cb59 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/InsertNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/InsertNode.java
@@ -95,6 +95,10 @@ public class InsertNode extends StoreTableNode implements Cloneable {
this.targetSchema = schema;
}
+ public boolean hasProjectedSchema() {
+ return this.projectedSchema != null;
+ }
+
public Schema getProjectedSchema() {
return this.projectedSchema;
}
@@ -123,11 +127,12 @@ public class InsertNode extends StoreTableNode implements Cloneable {
public boolean equals(Object obj) {
if (obj instanceof InsertNode) {
InsertNode other = (InsertNode) obj;
- return super.equals(other)
- && this.overwrite == other.overwrite
- && TUtil.checkEquals(this.tableSchema, other.tableSchema)
- && TUtil.checkEquals(this.targetSchema, other.targetSchema)
- && TUtil.checkEquals(path, other.path);
+ boolean eq = super.equals(other);
+ eq &= this.overwrite == other.overwrite;
+ eq &= TUtil.checkEquals(this.tableSchema, other.tableSchema);
+ eq &= TUtil.checkEquals(this.targetSchema, other.targetSchema);
+ eq &= TUtil.checkEquals(path, other.path);
+ return eq;
} else {
return false;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/LogicalNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/LogicalNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/LogicalNode.java
index c42a05e..200977b 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/LogicalNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/LogicalNode.java
@@ -30,24 +30,24 @@ import org.apache.tajo.plan.serder.PlanGsonHelper;
import org.apache.tajo.util.TUtil;
public abstract class LogicalNode implements Cloneable, GsonObject {
- @Expose private int pid;
+ @Expose private int nodeId;
@Expose private NodeType type;
@Expose private Schema inputSchema;
@Expose private Schema outputSchema;
@Expose private double cost = 0;
- protected LogicalNode(int pid, NodeType type) {
- this.pid = pid;
+ protected LogicalNode(int nodeId, NodeType type) {
+ this.nodeId = nodeId;
this.type = type;
}
public int getPID() {
- return pid;
+ return nodeId;
}
public void setPID(int pid) {
- this.pid = pid;
+ this.nodeId = pid;
}
public NodeType getType() {
@@ -58,6 +58,10 @@ public abstract class LogicalNode implements Cloneable, GsonObject {
this.type = type;
}
+ public abstract int childNum();
+
+ public abstract LogicalNode getChild(int idx);
+
public double getCost() {
return this.cost;
}
@@ -105,7 +109,7 @@ public abstract class LogicalNode implements Cloneable, GsonObject {
@Override
public Object clone() throws CloneNotSupportedException {
LogicalNode node = (LogicalNode)super.clone();
- node.pid = pid;
+ node.nodeId = nodeId;
node.type = type;
node.inputSchema = (Schema) (inputSchema != null ? inputSchema.clone() : null);
node.outputSchema = (Schema) (outputSchema != null ? outputSchema.clone() : null);
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/NodeType.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/NodeType.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/NodeType.java
index 9f01de9..75ae3b7 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/NodeType.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/NodeType.java
@@ -32,22 +32,22 @@ public enum NodeType {
EXPRS(EvalExprNode.class),
PROJECTION(ProjectionNode.class),
LIMIT(LimitNode.class),
+ WINDOW_AGG(WindowAggNode.class),
SORT(SortNode.class),
HAVING(HavingNode.class),
+ DISTINCT_GROUP_BY(DistinctGroupbyNode.class),
GROUP_BY(GroupbyNode.class),
- WINDOW_AGG(WindowAggNode.class),
SELECTION(SelectionNode.class),
JOIN(JoinNode.class),
UNION(UnionNode.class),
- EXCEPT(ExceptNode.class),
INTERSECT(IntersectNode.class),
+ EXCEPT(ExceptNode.class),
TABLE_SUBQUERY(TableSubQueryNode.class),
SCAN(ScanNode.class),
PARTITIONS_SCAN(PartitionedTableScanNode.class),
BST_INDEX_SCAN(IndexScanNode.class),
STORE(StoreTableNode.class),
INSERT(InsertNode.class),
- DISTINCT_GROUP_BY(DistinctGroupbyNode.class),
CREATE_DATABASE(CreateDatabaseNode.class),
DROP_DATABASE(DropDatabaseNode.class),
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ProjectionNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ProjectionNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ProjectionNode.java
index 4ef7e2d..c0b5953 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ProjectionNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ProjectionNode.java
@@ -25,16 +25,26 @@ import org.apache.tajo.plan.Target;
import org.apache.tajo.util.TUtil;
public class ProjectionNode extends UnaryNode implements Projectable {
+
+ @Expose private boolean distinct = false;
/**
* the targets are always filled even if the query is 'select *'
*/
@Expose private Target [] targets;
- @Expose private boolean distinct = false;
public ProjectionNode(int pid) {
super(pid, NodeType.PROJECTION);
}
+ public void init(boolean distinct, Target [] targets) {
+ this.distinct = distinct;
+ this.targets = targets;
+ }
+
+ public boolean isDistinct() {
+ return distinct;
+ }
+
public boolean hasTargets() {
return this.targets != null;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/RelationNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/RelationNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/RelationNode.java
index fd8e937..7e335b0 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/RelationNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/RelationNode.java
@@ -45,5 +45,5 @@ public abstract class RelationNode extends LogicalNode {
public abstract String getCanonicalName();
- public abstract Schema getTableSchema();
+ public abstract Schema getLogicalSchema();
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ScanNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ScanNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ScanNode.java
index 3e4abe3..a22f592 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ScanNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/ScanNode.java
@@ -42,6 +42,16 @@ public class ScanNode extends RelationNode implements Projectable, SelectableNod
super(pid, nodeType);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public ScanNode(int pid) {
super(pid, NodeType.SCAN);
}
@@ -101,8 +111,7 @@ public class ScanNode extends RelationNode implements Projectable, SelectableNod
}
}
- @Override
- public Schema getTableSchema() {
+ public Schema getLogicalSchema() {
return logicalSchema;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/SetSessionNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/SetSessionNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/SetSessionNode.java
index ba5f83e..117315f 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/SetSessionNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/SetSessionNode.java
@@ -19,6 +19,7 @@
package org.apache.tajo.plan.logical;
import com.google.gson.annotations.Expose;
+import org.apache.tajo.exception.UnsupportedException;
import org.apache.tajo.plan.PlanString;
public class SetSessionNode extends LogicalNode {
@@ -29,6 +30,13 @@ public class SetSessionNode extends LogicalNode {
super(pid, NodeType.SET_SESSION);
}
+ /**
+ * If both name and value are given, it will set a session variable.
+ * If a name is only given, it will unset a session variable.
+ *
+ * @param name Session variable name
+ * @param value Session variable value
+ */
public void init(String name, String value) {
this.name = name;
this.value = value;
@@ -38,8 +46,8 @@ public class SetSessionNode extends LogicalNode {
return name;
}
- public boolean isDefaultValue() {
- return value == null;
+ public boolean hasValue() {
+ return value != null;
}
public String getValue() {
@@ -47,6 +55,16 @@ public class SetSessionNode extends LogicalNode {
}
@Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ throw new UnsupportedException();
+ }
+
+ @Override
public void preOrder(LogicalNodeVisitor visitor) {
visitor.visit(this);
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/StoreTableNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/StoreTableNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/StoreTableNode.java
index 0623d21..730eb35 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/StoreTableNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/StoreTableNode.java
@@ -39,6 +39,15 @@ public class StoreTableNode extends PersistentStoreNode implements Cloneable {
return tableName != null;
}
+ /**
+ * Check if a table name is specified.
+ *
+ * @return FALSE if this node is used for 'INSERT INTO LOCATION'. Otherwise, it will be TRUE.
+ */
+ public boolean hasTableName() {
+ return tableName != null;
+ }
+
public void setTableName(String tableName) {
this.tableName = tableName;
}
@@ -73,7 +82,7 @@ public class StoreTableNode extends PersistentStoreNode implements Cloneable {
if (obj instanceof StoreTableNode) {
StoreTableNode other = (StoreTableNode) obj;
boolean eq = super.equals(other);
- eq = eq && this.tableName.equals(other.tableName);
+ eq = eq && TUtil.checkEquals(this.tableName, other.tableName);
eq = eq && TUtil.checkEquals(partitionDesc, other.partitionDesc);
return eq;
} else {
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TableSubQueryNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TableSubQueryNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TableSubQueryNode.java
index 4e5f41c..4e9bd5c 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TableSubQueryNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TableSubQueryNode.java
@@ -35,6 +35,16 @@ public class TableSubQueryNode extends RelationNode implements Projectable {
super(pid, NodeType.TABLE_SUBQUERY);
}
+ @Override
+ public int childNum() {
+ return 1;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return subQuery;
+ }
+
public void init(String tableName, LogicalNode subQuery) {
this.tableName = tableName;
if (subQuery != null) {
@@ -66,7 +76,7 @@ public class TableSubQueryNode extends RelationNode implements Projectable {
}
@Override
- public Schema getTableSchema() {
+ public Schema getLogicalSchema() {
// an output schema can be determined by targets. So, an input schema of
// TableSubQueryNode is only eligible for table schema.
//
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TruncateTableNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TruncateTableNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TruncateTableNode.java
index 10c65b6..0166ef8 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TruncateTableNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/TruncateTableNode.java
@@ -32,6 +32,16 @@ public class TruncateTableNode extends LogicalNode {
super(pid, NodeType.TRUNCATE_TABLE);
}
+ @Override
+ public int childNum() {
+ return 0;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ return null;
+ }
+
public List<String> getTableNames() {
return tableNames;
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/UnaryNode.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/UnaryNode.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/UnaryNode.java
index 0fc5c37..16a7f1b 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/UnaryNode.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/UnaryNode.java
@@ -31,6 +31,20 @@ public abstract class UnaryNode extends LogicalNode implements Cloneable {
public UnaryNode(int pid, NodeType type) {
super(pid, type);
}
+
+ @Override
+ public int childNum() {
+ return 1;
+ }
+
+ @Override
+ public LogicalNode getChild(int idx) {
+ if (idx == 0) {
+ return child;
+ } else {
+ throw new ArrayIndexOutOfBoundsException(idx);
+ }
+ }
public void setChild(LogicalNode subNode) {
this.child = subNode;
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/logical/WindowSpec.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/WindowSpec.java b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/WindowSpec.java
index 73f4e13..cdae68f 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/logical/WindowSpec.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/logical/WindowSpec.java
@@ -73,7 +73,7 @@ public class WindowSpec {
return Objects.hashCode(windowName, partitionKeys, windowFrame);
}
- public static class WindowFrame {
+ public static class WindowFrame implements Cloneable {
@Expose private WindowStartBound startBound;
@Expose private WindowEndBound endBound;
@Expose org.apache.tajo.algebra.WindowSpec.WindowFrameUnit unit; // TODO - to be supported
@@ -83,12 +83,8 @@ public class WindowSpec {
this.endBound = new WindowEndBound(WindowFrameEndBoundType.UNBOUNDED_FOLLOWING);
}
- public WindowFrame(WindowStartBound startBound) {
- this.startBound = startBound;
- }
-
public WindowFrame(WindowStartBound startBound, WindowEndBound endBound) {
- this(startBound);
+ this.startBound = startBound;
this.endBound = endBound;
}
@@ -120,21 +116,29 @@ public class WindowSpec {
public boolean equals(Object obj) {
if (obj instanceof WindowFrame) {
WindowFrame another = (WindowFrame) obj;
- return
- TUtil.checkEquals(startBound, another.startBound) &&
- TUtil.checkEquals(endBound, another.endBound) &&
- TUtil.checkEquals(unit, another.unit);
+ boolean eq = TUtil.checkEquals(startBound, another.startBound);
+ eq &= TUtil.checkEquals(endBound, another.endBound);
+ eq &= TUtil.checkEquals(unit, another.unit);
+ return eq;
} else {
return false;
}
}
+ public WindowFrame clone() throws CloneNotSupportedException {
+ WindowFrame newFrame = (WindowFrame) super.clone();
+ newFrame.startBound = startBound.clone();
+ newFrame.endBound = endBound.clone();
+ newFrame.unit = unit;
+ return newFrame;
+ }
+
public int hashCode() {
return Objects.hashCode(startBound, endBound, unit);
}
}
- public static class WindowStartBound {
+ public static class WindowStartBound implements Cloneable {
@Expose private WindowFrameStartBoundType boundType;
@Expose private EvalNode number;
@@ -158,7 +162,9 @@ public class WindowSpec {
public boolean equals(Object obj) {
if (obj instanceof WindowStartBound) {
WindowStartBound other = (WindowStartBound) obj;
- return boundType == other.boundType && number.equals(other.number);
+ boolean eq = boundType == other.boundType;
+ eq &= TUtil.checkEquals(number, other.number);
+ return eq;
} else {
return false;
}
@@ -168,9 +174,19 @@ public class WindowSpec {
public int hashCode() {
return Objects.hashCode(boundType, number);
}
+
+ @Override
+ public WindowStartBound clone() throws CloneNotSupportedException {
+ WindowStartBound newStartBound = (WindowStartBound) super.clone();
+ newStartBound.boundType = boundType;
+ if (number != null) {
+ newStartBound.number = (EvalNode) number.clone();
+ }
+ return newStartBound;
+ }
}
- public static class WindowEndBound {
+ public static class WindowEndBound implements Cloneable {
@Expose private WindowFrameEndBoundType boundType;
@Expose private EvalNode number;
@@ -192,9 +208,11 @@ public class WindowSpec {
@Override
public boolean equals(Object obj) {
- if (obj instanceof WindowStartBound) {
+ if (obj instanceof WindowEndBound) {
WindowEndBound other = (WindowEndBound) obj;
- return boundType == other.boundType && number.equals(other.number);
+ boolean eq = boundType == other.boundType;
+ eq &= TUtil.checkEquals(number, other.number);
+ return eq;
} else {
return false;
}
@@ -204,5 +222,14 @@ public class WindowSpec {
public int hashCode() {
return Objects.hashCode(boundType, number);
}
+
+ public WindowEndBound clone() throws CloneNotSupportedException {
+ WindowEndBound newEndBound = (WindowEndBound) super.clone();
+ newEndBound.boundType = boundType;
+ if (number != null) {
+ newEndBound.number = (EvalNode) number.clone();
+ }
+ return newEndBound;
+ }
}
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/NameResolver.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/NameResolver.java b/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/NameResolver.java
index 51a016f..44d3263 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/NameResolver.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/NameResolver.java
@@ -128,7 +128,7 @@ public abstract class NameResolver {
CatalogUtil.buildFQName(relationOp.getCanonicalName(), CatalogUtil.extractSimpleName(canonicalName));
}
- Schema schema = relationOp.getTableSchema();
+ Schema schema = relationOp.getLogicalSchema();
Column column = schema.getColumn(canonicalName);
return column;
@@ -173,7 +173,7 @@ public abstract class NameResolver {
List<Column> candidates = TUtil.newList();
for (RelationNode rel : block.getRelations()) {
- Column found = rel.getTableSchema().getColumn(columnRef.getName());
+ Column found = rel.getLogicalSchema().getColumn(columnRef.getName());
if (found != null) {
candidates.add(found);
}
@@ -201,7 +201,7 @@ public abstract class NameResolver {
for (LogicalPlan.QueryBlock eachBlock : plan.getQueryBlocks()) {
for (RelationNode rel : eachBlock.getRelations()) {
- Column found = rel.getTableSchema().getColumn(columnRef.getName());
+ Column found = rel.getLogicalSchema().getColumn(columnRef.getName());
if (found != null) {
candidates.add(found);
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/ResolverByLegacy.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/ResolverByLegacy.java b/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/ResolverByLegacy.java
index a1d9dbd..19f39dd 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/ResolverByLegacy.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/nameresolver/ResolverByLegacy.java
@@ -74,7 +74,7 @@ public class ResolverByLegacy extends NameResolver {
Schema currentNodeSchema = null;
if (currentNode != null) {
if (currentNode instanceof RelationNode) {
- currentNodeSchema = ((RelationNode) currentNode).getTableSchema();
+ currentNodeSchema = ((RelationNode) currentNode).getLogicalSchema();
} else {
currentNodeSchema = currentNode.getInSchema();
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteEngine.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteEngine.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteEngine.java
new file mode 100644
index 0000000..19c254b
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteEngine.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.PlanningException;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * This is a basic query rewrite rule engine. This rewrite rule engine
+ * rewrites a logical plan with various query rewrite rules.
+ */
+public class BaseLogicalPlanRewriteEngine implements LogicalPlanRewriteEngine {
+ /** class logger */
+ private Log LOG = LogFactory.getLog(BaseLogicalPlanRewriteEngine.class);
+
+ /** a map for query rewrite rules */
+ private Map<String, LogicalPlanRewriteRule> rewriteRules = new LinkedHashMap<String, LogicalPlanRewriteRule>();
+
+ /**
+ * Add a query rewrite rule to this engine.
+ *
+ * @param rules Rule classes
+ */
+ public void addRewriteRule(Iterable<Class<? extends LogicalPlanRewriteRule>> rules) {
+ for (Class<? extends LogicalPlanRewriteRule> clazz : rules) {
+ try {
+ LogicalPlanRewriteRule rule = clazz.newInstance();
+ addRewriteRule(rule);
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+ }
+
+ /**
+ * Add a query rewrite rule to this engine.
+ *
+ * @param rule The rule to be added to this engine.
+ */
+ public void addRewriteRule(LogicalPlanRewriteRule rule) {
+ if (!rewriteRules.containsKey(rule.getName())) {
+ rewriteRules.put(rule.getName(), rule);
+ }
+ }
+
+ /**
+ * Rewrite a logical plan with all query rewrite rules added to this engine.
+ *
+ * @param plan The plan to be rewritten with all query rewrite rule.
+ * @return The rewritten plan.
+ */
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
+ LogicalPlanRewriteRule rule;
+ for (Entry<String, LogicalPlanRewriteRule> rewriteRule : rewriteRules.entrySet()) {
+ rule = rewriteRule.getValue();
+ if (rule.isEligible(queryContext, plan)) {
+ plan = rule.rewrite(queryContext, plan);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The rule \"" + rule.getName() + " \" rewrites the query.");
+ }
+ }
+ }
+
+ return plan;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteRuleProvider.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteRuleProvider.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteRuleProvider.java
new file mode 100644
index 0000000..eb96149
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BaseLogicalPlanRewriteRuleProvider.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite;
+
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.plan.rewrite.rules.FilterPushDownRule;
+import org.apache.tajo.plan.rewrite.rules.PartitionedTableRewriter;
+import org.apache.tajo.plan.rewrite.rules.ProjectionPushDownRule;
+import org.apache.tajo.util.TUtil;
+
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * Default RewriteRuleProvider
+ */
+@SuppressWarnings("unused")
+public class BaseLogicalPlanRewriteRuleProvider extends LogicalPlanRewriteRuleProvider {
+
+ public BaseLogicalPlanRewriteRuleProvider(TajoConf conf) {
+ super(conf);
+ }
+
+ @Override
+ public Collection<Class<? extends LogicalPlanRewriteRule>> getPreRules() {
+ List<Class<? extends LogicalPlanRewriteRule>> rules = TUtil.newList();
+
+ if (systemConf.getBoolVar(TajoConf.ConfVars.$TEST_FILTER_PUSHDOWN_ENABLED)) {
+ rules.add(FilterPushDownRule.class);
+ }
+
+ return rules;
+ }
+
+ @Override
+ public Collection<Class<? extends LogicalPlanRewriteRule>> getPostRules() {
+ List<Class<? extends LogicalPlanRewriteRule>> rules = TUtil.newList(
+ ProjectionPushDownRule.class,
+ PartitionedTableRewriter.class
+ );
+ return rules;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BasicQueryRewriteEngine.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BasicQueryRewriteEngine.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BasicQueryRewriteEngine.java
deleted file mode 100644
index 491dda1..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/BasicQueryRewriteEngine.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.plan.rewrite;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.tajo.plan.LogicalPlan;
-import org.apache.tajo.plan.PlanningException;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-/**
- * This is a basic query rewrite rule engine. This rewrite rule engine
- * rewrites a logical plan with various query rewrite rules.
- */
-public class BasicQueryRewriteEngine implements QueryRewriteEngine {
- /** class logger */
- private Log LOG = LogFactory.getLog(BasicQueryRewriteEngine.class);
-
- /** a map for query rewrite rules */
- private Map<String, RewriteRule> rewriteRules = new LinkedHashMap<String, RewriteRule>();
-
- /**
- * Add a query rewrite rule to this engine.
- *
- * @param rule The rule to be added to this engine.
- */
- public void addRewriteRule(RewriteRule rule) {
- if (!rewriteRules.containsKey(rule.getName())) {
- rewriteRules.put(rule.getName(), rule);
- }
- }
-
- /**
- * Rewrite a logical plan with all query rewrite rules added to this engine.
- *
- * @param plan The plan to be rewritten with all query rewrite rule.
- * @return The rewritten plan.
- */
- public LogicalPlan rewrite(LogicalPlan plan) throws PlanningException {
- RewriteRule rule;
- for (Entry<String, RewriteRule> rewriteRule : rewriteRules.entrySet()) {
- rule = rewriteRule.getValue();
- if (rule.isEligible(plan)) {
- plan = rule.rewrite(plan);
- if (LOG.isDebugEnabled()) {
- LOG.debug("The rule \"" + rule.getName() + " \" rewrites the query.");
- }
- }
- }
-
- return plan;
- }
-}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteEngine.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteEngine.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteEngine.java
new file mode 100644
index 0000000..267d651
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteEngine.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite;
+
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.PlanningException;
+
+public interface LogicalPlanRewriteEngine {
+ /**
+ * Rewrite a logical plan with all query rewrite rules added to this engine.
+ *
+ * @param plan The plan to be rewritten with all query rewrite rule.
+ * @return The rewritten plan.
+ */
+ LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException;
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRule.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRule.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRule.java
new file mode 100644
index 0000000..2f0652b
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRule.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite;
+
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.PlanningException;
+
+/**
+ * An interface for a rewrite rule.
+ */
+public interface LogicalPlanRewriteRule {
+
+ /**
+ * It returns the rewrite rule name. It will be used for debugging and
+ * building a optimization history.
+ *
+ * @return The rewrite rule name
+ */
+ String getName();
+
+ /**
+ * This method checks if this rewrite rule can be applied to a given query plan.
+ * For example, the selection push down can not be applied to the query plan without any filter.
+ * In such case, it will return false.
+ *
+ * @param plan The plan to be checked
+ * @return True if this rule can be applied to a given plan. Otherwise, false.
+ */
+ boolean isEligible(OverridableConf queryContext, LogicalPlan plan);
+
+ /**
+ * Updates a logical plan and returns an updated logical plan rewritten by this rule.
+ * It must be guaranteed that the input logical plan is not modified even after rewrite.
+ * In other words, the rewrite has to modify an plan copied from the input plan.
+ *
+ * @param plan Input logical plan. It will not be modified.
+ * @return The rewritten logical plan.
+ */
+ LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException;
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRuleProvider.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRuleProvider.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRuleProvider.java
new file mode 100644
index 0000000..934549e
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanRewriteRuleProvider.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite;
+
+import org.apache.tajo.conf.TajoConf;
+
+import java.util.Collection;
+
+public abstract class LogicalPlanRewriteRuleProvider {
+ protected final TajoConf systemConf;
+
+ public LogicalPlanRewriteRuleProvider(TajoConf systemConf) {
+ this.systemConf = systemConf;
+ }
+
+ /**
+ * It returns RewriteRule classes which should be executed before join ordering.
+ *
+ * @return RewriteRule classes
+ */
+ public abstract Collection<Class<? extends LogicalPlanRewriteRule>> getPreRules();
+ /**
+ * It returns RewriteRule classes which should be executed after join ordering.
+ *
+ * @return RewriteRule classes
+ */
+ public abstract Collection<Class<? extends LogicalPlanRewriteRule>> getPostRules();
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanTestRuleProvider.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanTestRuleProvider.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanTestRuleProvider.java
new file mode 100644
index 0000000..704e7ed
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/LogicalPlanTestRuleProvider.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite;
+
+import com.google.common.collect.Lists;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.plan.rewrite.rules.LogicalPlanEqualityTester;
+
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * It is used only for testing.
+ */
+@SuppressWarnings("unused")
+public class LogicalPlanTestRuleProvider extends BaseLogicalPlanRewriteRuleProvider {
+
+ public LogicalPlanTestRuleProvider(TajoConf conf) {
+ super(conf);
+ }
+
+ @Override
+ public Collection<Class<? extends LogicalPlanRewriteRule>> getPostRules() {
+ List<Class<? extends LogicalPlanRewriteRule>> injectedRules = Lists.newArrayList(super.getPostRules());
+ injectedRules.add(LogicalPlanEqualityTester.class);
+ return injectedRules;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/QueryRewriteEngine.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/QueryRewriteEngine.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/QueryRewriteEngine.java
deleted file mode 100644
index b7f5637..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/QueryRewriteEngine.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.plan.rewrite;
-
-import org.apache.tajo.plan.LogicalPlan;
-import org.apache.tajo.plan.PlanningException;
-
-public interface QueryRewriteEngine {
- /**
- * Rewrite a logical plan with all query rewrite rules added to this engine.
- *
- * @param plan The plan to be rewritten with all query rewrite rule.
- * @return The rewritten plan.
- */
- LogicalPlan rewrite(LogicalPlan plan) throws PlanningException;
-}
[2/8] tajo git commit: TAJO-269: Protocol buffer De/Serialization for
LogicalNode.
Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/RewriteRule.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/RewriteRule.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/RewriteRule.java
deleted file mode 100644
index 0ba7460..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/RewriteRule.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.plan.rewrite;
-
-import org.apache.tajo.plan.LogicalPlan;
-import org.apache.tajo.plan.PlanningException;
-
-/**
- * An interface for a rewrite rule.
- */
-public interface RewriteRule {
-
- /**
- * It returns the rewrite rule name. It will be used for debugging and
- * building a optimization history.
- *
- * @return The rewrite rule name
- */
- String getName();
-
- /**
- * This method checks if this rewrite rule can be applied to a given query plan.
- * For example, the selection push down can not be applied to the query plan without any filter.
- * In such case, it will return false.
- *
- * @param plan The plan to be checked
- * @return True if this rule can be applied to a given plan. Otherwise, false.
- */
- boolean isEligible(LogicalPlan plan);
-
- /**
- * Updates a logical plan and returns an updated logical plan rewritten by this rule.
- * It must be guaranteed that the input logical plan is not modified even after rewrite.
- * In other words, the rewrite has to modify an plan copied from the input plan.
- *
- * @param plan Input logical plan. It will not be modified.
- * @return The rewritten logical plan.
- */
- LogicalPlan rewrite(LogicalPlan plan) throws PlanningException;
-}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/FilterPushDownRule.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/FilterPushDownRule.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/FilterPushDownRule.java
index ed410f9..15750a1 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/FilterPushDownRule.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/FilterPushDownRule.java
@@ -21,6 +21,7 @@ package org.apache.tajo.plan.rewrite.rules;
import com.google.common.collect.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.tajo.OverridableConf;
import org.apache.tajo.algebra.JoinType;
import org.apache.tajo.catalog.CatalogUtil;
import org.apache.tajo.catalog.Column;
@@ -30,7 +31,7 @@ import org.apache.tajo.plan.*;
import org.apache.tajo.plan.expr.*;
import org.apache.tajo.plan.logical.*;
import org.apache.tajo.plan.rewrite.rules.FilterPushDownRule.FilterPushDownContext;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.plan.visitor.BasicLogicalPlanVisitor;
import org.apache.tajo.util.TUtil;
@@ -42,7 +43,7 @@ import java.util.*;
* It is likely to significantly reduces the intermediate data.
*/
public class FilterPushDownRule extends BasicLogicalPlanVisitor<FilterPushDownContext, LogicalNode>
- implements RewriteRule {
+ implements LogicalPlanRewriteRule {
private final static Log LOG = LogFactory.getLog(FilterPushDownRule.class);
private static final String NAME = "FilterPushDown";
@@ -79,7 +80,7 @@ public class FilterPushDownRule extends BasicLogicalPlanVisitor<FilterPushDownCo
}
@Override
- public boolean isEligible(LogicalPlan plan) {
+ public boolean isEligible(OverridableConf queryContext, LogicalPlan plan) {
for (LogicalPlan.QueryBlock block : plan.getQueryBlocks()) {
if (block.hasNode(NodeType.SELECTION) || block.hasNode(NodeType.JOIN)) {
return true;
@@ -89,7 +90,7 @@ public class FilterPushDownRule extends BasicLogicalPlanVisitor<FilterPushDownCo
}
@Override
- public LogicalPlan rewrite(LogicalPlan plan) throws PlanningException {
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
/*
FilterPushDown rule: processing when visits each node
- If a target which is corresponding on a filter EvalNode's column is not FieldEval, do not PushDown.
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/LogicalPlanEqualityTester.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/LogicalPlanEqualityTester.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/LogicalPlanEqualityTester.java
new file mode 100644
index 0000000..8a24add
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/LogicalPlanEqualityTester.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.rewrite.rules;
+
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.PlanningException;
+import org.apache.tajo.plan.logical.LogicalNode;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
+import org.apache.tajo.plan.serder.LogicalNodeDeserializer;
+import org.apache.tajo.plan.serder.LogicalNodeSerializer;
+import org.apache.tajo.plan.serder.PlanProto;
+
+/**
+ * It verifies the equality between the input and output of LogicalNodeTree(De)Serializer in logical planning.
+ * It is used only for testing.
+ */
+@SuppressWarnings("unused")
+public class LogicalPlanEqualityTester implements LogicalPlanRewriteRule {
+
+ @Override
+ public String getName() {
+ return "LogicalPlanEqualityTester";
+ }
+
+ @Override
+ public boolean isEligible(OverridableConf queryContext, LogicalPlan plan) {
+ return true;
+ }
+
+ @Override
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
+ LogicalNode root = plan.getRootBlock().getRoot();
+ PlanProto.LogicalNodeTree serialized = LogicalNodeSerializer.serialize(plan.getRootBlock().getRoot());
+ LogicalNode deserialized = LogicalNodeDeserializer.deserialize(queryContext, serialized);
+ assert root.deepEquals(deserialized);
+ return plan;
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
index ea58437..7604c53 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/PartitionedTableRewriter.java
@@ -23,15 +23,15 @@ import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.*;
+import org.apache.tajo.OverridableConf;
import org.apache.tajo.catalog.Column;
import org.apache.tajo.catalog.Schema;
import org.apache.tajo.catalog.TableDesc;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
-import org.apache.tajo.conf.TajoConf;
import org.apache.tajo.datum.DatumFactory;
import org.apache.tajo.datum.NullDatum;
import org.apache.tajo.plan.LogicalPlan;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.plan.PlanningException;
import org.apache.tajo.plan.expr.*;
@@ -46,25 +46,19 @@ import java.util.List;
import java.util.Set;
import java.util.Stack;
-public class PartitionedTableRewriter implements RewriteRule {
+public class PartitionedTableRewriter implements LogicalPlanRewriteRule {
private static final Log LOG = LogFactory.getLog(PartitionedTableRewriter.class);
private static final String NAME = "Partitioned Table Rewriter";
private final Rewriter rewriter = new Rewriter();
- private final TajoConf systemConf;
-
- public PartitionedTableRewriter(TajoConf conf) {
- systemConf = conf;
- }
-
@Override
public String getName() {
return NAME;
}
@Override
- public boolean isEligible(LogicalPlan plan) {
+ public boolean isEligible(OverridableConf queryContext, LogicalPlan plan) {
for (LogicalPlan.QueryBlock block : plan.getQueryBlocks()) {
for (RelationNode relation : block.getRelations()) {
if (relation.getType() == NodeType.SCAN) {
@@ -79,9 +73,9 @@ public class PartitionedTableRewriter implements RewriteRule {
}
@Override
- public LogicalPlan rewrite(LogicalPlan plan) throws PlanningException {
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
LogicalPlan.QueryBlock rootBlock = plan.getRootBlock();
- rewriter.visit(rootBlock, plan, rootBlock, rootBlock.getRoot(), new Stack<LogicalNode>());
+ rewriter.visit(queryContext, plan, rootBlock, rootBlock.getRoot(), new Stack<LogicalNode>());
return plan;
}
@@ -120,10 +114,11 @@ public class PartitionedTableRewriter implements RewriteRule {
* @return
* @throws IOException
*/
- private Path [] findFilteredPaths(Schema partitionColumns, EvalNode [] conjunctiveForms, Path tablePath)
+ private Path [] findFilteredPaths(OverridableConf queryContext, Schema partitionColumns, EvalNode [] conjunctiveForms,
+ Path tablePath)
throws IOException {
- FileSystem fs = tablePath.getFileSystem(systemConf);
+ FileSystem fs = tablePath.getFileSystem(queryContext.getConf());
PathFilter [] filters;
if (conjunctiveForms == null) {
@@ -223,7 +218,7 @@ public class PartitionedTableRewriter implements RewriteRule {
return paths;
}
- private Path [] findFilteredPartitionPaths(ScanNode scanNode) throws IOException {
+ private Path [] findFilteredPartitionPaths(OverridableConf queryContext, ScanNode scanNode) throws IOException {
TableDesc table = scanNode.getTableDesc();
PartitionMethodDesc partitionDesc = scanNode.getTableDesc().getPartitionMethod();
@@ -262,10 +257,10 @@ public class PartitionedTableRewriter implements RewriteRule {
}
if (indexablePredicateSet.size() > 0) { // There are at least one indexable predicates
- return findFilteredPaths(paritionValuesSchema,
+ return findFilteredPaths(queryContext, paritionValuesSchema,
indexablePredicateSet.toArray(new EvalNode[indexablePredicateSet.size()]), new Path(table.getPath()));
} else { // otherwise, we will get all partition paths.
- return findFilteredPaths(paritionValuesSchema, null, new Path(table.getPath()));
+ return findFilteredPaths(queryContext, paritionValuesSchema, null, new Path(table.getPath()));
}
}
@@ -314,10 +309,11 @@ public class PartitionedTableRewriter implements RewriteRule {
}
}
- private void updateTableStat(PartitionedTableScanNode scanNode) throws PlanningException {
+ private void updateTableStat(OverridableConf queryContext, PartitionedTableScanNode scanNode)
+ throws PlanningException {
if (scanNode.getInputPaths().length > 0) {
try {
- FileSystem fs = scanNode.getInputPaths()[0].getFileSystem(systemConf);
+ FileSystem fs = scanNode.getInputPaths()[0].getFileSystem(queryContext.getConf());
long totalVolume = 0;
for (Path input : scanNode.getInputPaths()) {
@@ -396,10 +392,10 @@ public class PartitionedTableRewriter implements RewriteRule {
return sb.toString();
}
- private final class Rewriter extends BasicLogicalPlanVisitor<Object, Object> {
+ private final class Rewriter extends BasicLogicalPlanVisitor<OverridableConf, Object> {
@Override
- public Object visitScan(Object object, LogicalPlan plan, LogicalPlan.QueryBlock block, ScanNode scanNode,
- Stack<LogicalNode> stack) throws PlanningException {
+ public Object visitScan(OverridableConf queryContext, LogicalPlan plan, LogicalPlan.QueryBlock block,
+ ScanNode scanNode, Stack<LogicalNode> stack) throws PlanningException {
TableDesc table = scanNode.getTableDesc();
if (!table.hasPartition()) {
@@ -407,11 +403,11 @@ public class PartitionedTableRewriter implements RewriteRule {
}
try {
- Path [] filteredPaths = findFilteredPartitionPaths(scanNode);
+ Path [] filteredPaths = findFilteredPartitionPaths(queryContext, scanNode);
plan.addHistory("PartitionTableRewriter chooses " + filteredPaths.length + " of partitions");
PartitionedTableScanNode rewrittenScanNode = plan.createNode(PartitionedTableScanNode.class);
rewrittenScanNode.init(scanNode, filteredPaths);
- updateTableStat(rewrittenScanNode);
+ updateTableStat(queryContext, rewrittenScanNode);
// if it is topmost node, set it as the rootnode of this block.
if (stack.empty() || block.getRoot().equals(scanNode)) {
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/ProjectionPushDownRule.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/ProjectionPushDownRule.java b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/ProjectionPushDownRule.java
index f7fd90d..abd2814 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/ProjectionPushDownRule.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/rewrite/rules/ProjectionPushDownRule.java
@@ -21,6 +21,7 @@ package org.apache.tajo.plan.rewrite.rules;
import com.google.common.collect.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.tajo.OverridableConf;
import org.apache.tajo.annotation.Nullable;
import org.apache.tajo.catalog.Column;
import org.apache.tajo.catalog.Schema;
@@ -29,7 +30,7 @@ import org.apache.tajo.plan.*;
import org.apache.tajo.plan.LogicalPlan.QueryBlock;
import org.apache.tajo.plan.expr.*;
import org.apache.tajo.plan.logical.*;
-import org.apache.tajo.plan.rewrite.RewriteRule;
+import org.apache.tajo.plan.rewrite.LogicalPlanRewriteRule;
import org.apache.tajo.plan.util.PlannerUtil;
import org.apache.tajo.catalog.SchemaUtil;
import org.apache.tajo.plan.visitor.BasicLogicalPlanVisitor;
@@ -44,7 +45,7 @@ import java.util.*;
* It also enables scanners to read only necessary columns.
*/
public class ProjectionPushDownRule extends
- BasicLogicalPlanVisitor<ProjectionPushDownRule.Context, LogicalNode> implements RewriteRule {
+ BasicLogicalPlanVisitor<ProjectionPushDownRule.Context, LogicalNode> implements LogicalPlanRewriteRule {
/** Class Logger */
private final Log LOG = LogFactory.getLog(ProjectionPushDownRule.class);
private static final String name = "ProjectionPushDown";
@@ -55,7 +56,7 @@ public class ProjectionPushDownRule extends
}
@Override
- public boolean isEligible(LogicalPlan plan) {
+ public boolean isEligible(OverridableConf queryContext, LogicalPlan plan) {
LogicalNode toBeOptimized = plan.getRootBlock().getRoot();
if (PlannerUtil.checkIfDDLPlan(toBeOptimized)) {
@@ -70,7 +71,7 @@ public class ProjectionPushDownRule extends
}
@Override
- public LogicalPlan rewrite(LogicalPlan plan) throws PlanningException {
+ public LogicalPlan rewrite(OverridableConf queryContext, LogicalPlan plan) throws PlanningException {
LogicalPlan.QueryBlock rootBlock = plan.getRootBlock();
LogicalPlan.QueryBlock topmostBlock = rootBlock;
@@ -1044,7 +1045,7 @@ public class ProjectionPushDownRule extends
if (node.hasTargets()) {
targets = node.getTargets();
} else {
- targets = PlannerUtil.schemaToTargets(node.getTableSchema());
+ targets = PlannerUtil.schemaToTargets(node.getLogicalSchema());
}
LinkedHashSet<Target> projectedTargets = Sets.newLinkedHashSet();
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeDeserializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeDeserializer.java
new file mode 100644
index 0000000..322c8db
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeDeserializer.java
@@ -0,0 +1,301 @@
+/*
+ * Lisensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.serder;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.tajo.OverridableConf;
+import org.apache.tajo.algebra.WindowSpec.WindowFrameEndBoundType;
+import org.apache.tajo.algebra.WindowSpec.WindowFrameStartBoundType;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.FunctionDesc;
+import org.apache.tajo.catalog.SortSpec;
+import org.apache.tajo.catalog.exception.NoSuchFunctionException;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.datum.*;
+import org.apache.tajo.exception.InternalException;
+import org.apache.tajo.plan.expr.*;
+import org.apache.tajo.plan.function.AggFunction;
+import org.apache.tajo.plan.function.GeneralFunction;
+import org.apache.tajo.plan.logical.WindowSpec;
+import org.apache.tajo.plan.serder.PlanProto.WinFunctionEvalSpec;
+
+import java.util.*;
+
+/**
+ * It deserializes a serialized eval tree consisting of a number of EvalNodes.
+ *
+ * {@link EvalNodeSerializer} serializes an eval tree in a postfix traverse order.
+ * So, this class firstly sorts all serialized eval nodes in ascending order of their sequence IDs. Then,
+ * it sequentially restores each serialized node to EvalNode instance.
+ *
+ * @see EvalNodeSerializer
+ */
+public class EvalNodeDeserializer {
+
+ public static EvalNode deserialize(OverridableConf context, PlanProto.EvalNodeTree tree) {
+ Map<Integer, EvalNode> evalNodeMap = Maps.newHashMap();
+
+ // sort serialized eval nodes in an ascending order of their IDs.
+ List<PlanProto.EvalNode> nodeList = Lists.newArrayList(tree.getNodesList());
+ Collections.sort(nodeList, new Comparator<PlanProto.EvalNode>() {
+ @Override
+ public int compare(PlanProto.EvalNode o1, PlanProto.EvalNode o2) {
+ return o1.getId() - o2.getId();
+ }
+ });
+
+ EvalNode current = null;
+
+ // The sorted order is the same of a postfix traverse order.
+ // So, it sequentially transforms each serialized node into a EvalNode instance in a postfix order of
+ // the original eval tree.
+
+ Iterator<PlanProto.EvalNode> it = nodeList.iterator();
+ while (it.hasNext()) {
+ PlanProto.EvalNode protoNode = it.next();
+
+ EvalType type = EvalType.valueOf(protoNode.getType().name());
+
+ if (EvalType.isUnaryOperator(type)) {
+ PlanProto.UnaryEval unaryProto = protoNode.getUnary();
+ EvalNode child = evalNodeMap.get(unaryProto.getChildId());
+
+ switch (type) {
+ case NOT:
+ current = new NotEval(child);
+ break;
+ case IS_NULL:
+ current = new IsNullEval(unaryProto.getNegative(), child);
+ break;
+ case CAST:
+ current = new CastEval(context, child, unaryProto.getCastingType());
+ break;
+ case SIGNED:
+ current = new SignedEval(unaryProto.getNegative(), child);
+ break;
+ default:
+ throw new RuntimeException("Unknown EvalType: " + type.name());
+ }
+
+ } else if (EvalType.isBinaryOperator(type)) {
+ PlanProto.BinaryEval binProto = protoNode.getBinary();
+ EvalNode lhs = evalNodeMap.get(binProto.getLhsId());
+ EvalNode rhs = evalNodeMap.get(binProto.getRhsId());
+
+ switch (type) {
+ case IN:
+ current = new InEval(lhs, (RowConstantEval) rhs, binProto.getNegative());
+ break;
+ case LIKE: {
+ PlanProto.PatternMatchEvalSpec patternMatchProto = protoNode.getPatternMatch();
+ current = new LikePredicateEval(binProto.getNegative(), lhs, (ConstEval) rhs,
+ patternMatchProto.getCaseSensitive());
+ break;
+ }
+ case REGEX: {
+ PlanProto.PatternMatchEvalSpec patternMatchProto = protoNode.getPatternMatch();
+ current = new RegexPredicateEval(binProto.getNegative(), lhs, (ConstEval) rhs,
+ patternMatchProto.getCaseSensitive());
+ break;
+ }
+ case SIMILAR_TO: {
+ PlanProto.PatternMatchEvalSpec patternMatchProto = protoNode.getPatternMatch();
+ current = new SimilarToPredicateEval(binProto.getNegative(), lhs, (ConstEval) rhs,
+ patternMatchProto.getCaseSensitive());
+ break;
+ }
+
+ default:
+ current = new BinaryEval(type, lhs, rhs);
+ }
+
+ } else if (type == EvalType.CONST) {
+ PlanProto.ConstEval constProto = protoNode.getConst();
+ current = new ConstEval(deserialize(constProto.getValue()));
+
+ } else if (type == EvalType.ROW_CONSTANT) {
+ PlanProto.RowConstEval rowConstProto = protoNode.getRowConst();
+ Datum[] values = new Datum[rowConstProto.getValuesCount()];
+ for (int i = 0; i < rowConstProto.getValuesCount(); i++) {
+ values[i] = deserialize(rowConstProto.getValues(i));
+ }
+ current = new RowConstantEval(values);
+
+ } else if (type == EvalType.FIELD) {
+ CatalogProtos.ColumnProto columnProto = protoNode.getField();
+ current = new FieldEval(new Column(columnProto));
+
+ } else if (type == EvalType.BETWEEN) {
+ PlanProto.BetweenEval betweenProto = protoNode.getBetween();
+ current = new BetweenPredicateEval(betweenProto.getNegative(), betweenProto.getSymmetric(),
+ evalNodeMap.get(betweenProto.getPredicand()),
+ evalNodeMap.get(betweenProto.getBegin()),
+ evalNodeMap.get(betweenProto.getEnd()));
+
+ } else if (type == EvalType.CASE) {
+ PlanProto.CaseWhenEval caseWhenProto = protoNode.getCasewhen();
+ CaseWhenEval caseWhenEval = new CaseWhenEval();
+ for (int i = 0; i < caseWhenProto.getIfCondsCount(); i++) {
+ caseWhenEval.addIfCond((CaseWhenEval.IfThenEval) evalNodeMap.get(caseWhenProto.getIfConds(i)));
+ }
+ if (caseWhenProto.hasElse()) {
+ caseWhenEval.setElseResult(evalNodeMap.get(caseWhenProto.getElse()));
+ }
+ current = caseWhenEval;
+
+ } else if (type == EvalType.IF_THEN) {
+ PlanProto.IfCondEval ifCondProto = protoNode.getIfCond();
+ current = new CaseWhenEval.IfThenEval(evalNodeMap.get(ifCondProto.getCondition()),
+ evalNodeMap.get(ifCondProto.getThen()));
+
+ } else if (EvalType.isFunction(type)) {
+ PlanProto.FunctionEval funcProto = protoNode.getFunction();
+
+ EvalNode [] params = new EvalNode[funcProto.getParamIdsCount()];
+ for (int i = 0; i < funcProto.getParamIdsCount(); i++) {
+ params[i] = evalNodeMap.get(funcProto.getParamIds(i));
+ }
+
+ FunctionDesc funcDesc = null;
+ try {
+ funcDesc = new FunctionDesc(funcProto.getFuncion());
+ if (type == EvalType.FUNCTION) {
+ GeneralFunction instance = (GeneralFunction) funcDesc.newInstance();
+ current = new GeneralFunctionEval(context, new FunctionDesc(funcProto.getFuncion()), instance, params);
+
+ } else if (type == EvalType.AGG_FUNCTION || type == EvalType.WINDOW_FUNCTION) {
+ AggFunction instance = (AggFunction) funcDesc.newInstance();
+ if (type == EvalType.AGG_FUNCTION) {
+ AggregationFunctionCallEval aggFunc =
+ new AggregationFunctionCallEval(new FunctionDesc(funcProto.getFuncion()), instance, params);
+
+ PlanProto.AggFunctionEvalSpec aggFunctionProto = protoNode.getAggFunction();
+ aggFunc.setIntermediatePhase(aggFunctionProto.getIntermediatePhase());
+ aggFunc.setFinalPhase(aggFunctionProto.getFinalPhase());
+ if (aggFunctionProto.hasAlias()) {
+ aggFunc.setAlias(aggFunctionProto.getAlias());
+ }
+ current = aggFunc;
+
+ } else {
+ WinFunctionEvalSpec windowFuncProto = protoNode.getWinFunction();
+
+ WindowFunctionEval winFunc =
+ new WindowFunctionEval(new FunctionDesc(funcProto.getFuncion()), instance, params,
+ convertWindowFrame(windowFuncProto.getWindowFrame()));
+
+ if (windowFuncProto.getSortSpecCount() > 0) {
+ SortSpec[] sortSpecs = LogicalNodeDeserializer.convertSortSpecs(windowFuncProto.getSortSpecList());
+ winFunc.setSortSpecs(sortSpecs);
+ }
+
+ current = winFunc;
+ }
+ }
+ } catch (ClassNotFoundException cnfe) {
+ throw new NoSuchFunctionException(funcDesc.getFunctionName(), funcDesc.getParamTypes());
+ } catch (InternalException ie) {
+ throw new NoSuchFunctionException(funcDesc.getFunctionName(), funcDesc.getParamTypes());
+ }
+ } else {
+ throw new RuntimeException("Unknown EvalType: " + type.name());
+ }
+
+ evalNodeMap.put(protoNode.getId(), current);
+ }
+
+ return current;
+ }
+
+ private static WindowSpec.WindowFrame convertWindowFrame(WinFunctionEvalSpec.WindowFrame windowFrame) {
+ WindowFrameStartBoundType startBoundType = convertWindowStartBound(windowFrame.getStartBound().getBoundType());
+ WindowSpec.WindowStartBound startBound = new WindowSpec.WindowStartBound(startBoundType);
+
+ WindowFrameEndBoundType endBoundType = convertWindowEndBound(windowFrame.getEndBound().getBoundType());
+ WindowSpec.WindowEndBound endBound = new WindowSpec.WindowEndBound(endBoundType);
+
+ WindowSpec.WindowFrame frame = new WindowSpec.WindowFrame(startBound, endBound);
+ return frame;
+ }
+
+ private static WindowFrameStartBoundType convertWindowStartBound(
+ WinFunctionEvalSpec.WindowFrameStartBoundType type) {
+ if (type == WinFunctionEvalSpec.WindowFrameStartBoundType.S_UNBOUNDED_PRECEDING) {
+ return WindowFrameStartBoundType.UNBOUNDED_PRECEDING;
+ } else if (type == WinFunctionEvalSpec.WindowFrameStartBoundType.S_CURRENT_ROW) {
+ return WindowFrameStartBoundType.CURRENT_ROW;
+ } else if (type == WinFunctionEvalSpec.WindowFrameStartBoundType.S_PRECEDING) {
+ return WindowFrameStartBoundType.PRECEDING;
+ } else {
+ throw new IllegalStateException("Unknown Window Start Bound type: " + type.name());
+ }
+ }
+
+ private static WindowFrameEndBoundType convertWindowEndBound(
+ WinFunctionEvalSpec.WindowFrameEndBoundType type) {
+ if (type == WinFunctionEvalSpec.WindowFrameEndBoundType.E_UNBOUNDED_FOLLOWING) {
+ return WindowFrameEndBoundType.UNBOUNDED_FOLLOWING;
+ } else if (type == WinFunctionEvalSpec.WindowFrameEndBoundType.E_CURRENT_ROW) {
+ return WindowFrameEndBoundType.CURRENT_ROW;
+ } else if (type == WinFunctionEvalSpec.WindowFrameEndBoundType.E_FOLLOWING) {
+ return WindowFrameEndBoundType.FOLLOWING;
+ } else {
+ throw new IllegalStateException("Unknown Window Start Bound type: " + type.name());
+ }
+ }
+
+ public static Datum deserialize(PlanProto.Datum datum) {
+ switch (datum.getType()) {
+ case BOOLEAN:
+ return DatumFactory.createBool(datum.getBoolean());
+ case CHAR:
+ return DatumFactory.createChar(datum.getText());
+ case INT1:
+ case INT2:
+ return DatumFactory.createInt2((short) datum.getInt4());
+ case INT4:
+ return DatumFactory.createInt4(datum.getInt4());
+ case INT8:
+ return DatumFactory.createInt8(datum.getInt8());
+ case FLOAT4:
+ return DatumFactory.createFloat4(datum.getFloat4());
+ case FLOAT8:
+ return DatumFactory.createFloat8(datum.getFloat8());
+ case VARCHAR:
+ case TEXT:
+ return DatumFactory.createText(datum.getText());
+ case TIMESTAMP:
+ return new TimestampDatum(datum.getInt8());
+ case DATE:
+ return DatumFactory.createDate(datum.getInt4());
+ case TIME:
+ return DatumFactory.createTime(datum.getInt8());
+ case BINARY:
+ case BLOB:
+ return DatumFactory.createBlob(datum.getBlob().toByteArray());
+ case INTERVAL:
+ return new IntervalDatum(datum.getInterval().getMonth(), datum.getInterval().getMsec());
+ case NULL_TYPE:
+ return NullDatum.get();
+ default:
+ throw new RuntimeException("Unknown data type: " + datum.getType().name());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeSerializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeSerializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeSerializer.java
new file mode 100644
index 0000000..c7702c5
--- /dev/null
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalNodeSerializer.java
@@ -0,0 +1,397 @@
+/*
+ * Lisensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.plan.serder;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+import com.google.protobuf.ByteString;
+import org.apache.tajo.algebra.WindowSpec.WindowFrameEndBoundType;
+import org.apache.tajo.algebra.WindowSpec.WindowFrameStartBoundType;
+import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.datum.Datum;
+import org.apache.tajo.datum.IntervalDatum;
+import org.apache.tajo.plan.expr.*;
+import org.apache.tajo.plan.logical.WindowSpec;
+import org.apache.tajo.plan.serder.PlanProto.WinFunctionEvalSpec;
+import org.apache.tajo.plan.serder.PlanProto.WinFunctionEvalSpec.WindowFrame;
+import org.apache.tajo.util.ProtoUtil;
+
+import java.util.Map;
+import java.util.Stack;
+
+/**
+ * It traverses an eval tree consisting of a number of {@link org.apache.tajo.plan.expr.EvalNode}
+ * in a postfix traverse order. The postfix traverse order guarantees that all child nodes of some node N
+ * were already visited when the node N is visited. This manner makes tree serialization possible in a simple logic.
+ */
+public class EvalNodeSerializer
+ extends SimpleEvalNodeVisitor<EvalNodeSerializer.EvalTreeProtoBuilderContext> {
+
+ private static final EvalNodeSerializer instance;
+
+ static {
+ instance = new EvalNodeSerializer();
+ }
+
+ public static class EvalTreeProtoBuilderContext {
+ private int seqId = 0;
+ private Map<EvalNode, Integer> idMap = Maps.newHashMap();
+ private PlanProto.EvalNodeTree.Builder treeBuilder = PlanProto.EvalNodeTree.newBuilder();
+ }
+
+ public static PlanProto.EvalNodeTree serialize(EvalNode evalNode) {
+ EvalNodeSerializer.EvalTreeProtoBuilderContext context =
+ new EvalNodeSerializer.EvalTreeProtoBuilderContext();
+ instance.visit(context, evalNode, new Stack<EvalNode>());
+ return context.treeBuilder.build();
+ }
+
+ /**
+ * Return child's serialization IDs. Usually, 0 is used for a child id of unary node or left child of
+ * binary node. 1 is used for right child of binary node. Between will use 0 as predicand, 1 as begin, and 2 as
+ * end eval node. For more detail, you should refer to each EvalNode implementation.
+ *
+ * @param context Context
+ * @param evalNode EvalNode
+ * @return The array of IDs which points to stored EvalNode.
+ * @see org.apache.tajo.plan.expr.EvalNode
+ */
+ private int [] registerGetChildIds(EvalTreeProtoBuilderContext context, EvalNode evalNode) {
+ int [] childIds = new int[evalNode.childNum()];
+ for (int i = 0; i < evalNode.childNum(); i++) {
+ if (context.idMap.containsKey(evalNode.getChild(i))) {
+ childIds[i] = context.idMap.get(evalNode.getChild(i));
+ } else {
+ childIds[i] = context.seqId++;
+ }
+ }
+ return childIds;
+ }
+
+ private PlanProto.EvalNode.Builder createEvalBuilder(EvalTreeProtoBuilderContext context, EvalNode node) {
+ int sid; // serialization sequence id
+ if (context.idMap.containsKey(node)) {
+ sid = context.idMap.get(node);
+ } else {
+ sid = context.seqId++;
+ context.idMap.put(node, sid);
+ }
+
+ PlanProto.EvalNode.Builder nodeBuilder = PlanProto.EvalNode.newBuilder();
+ nodeBuilder.setId(sid);
+ nodeBuilder.setDataType(node.getValueType());
+ nodeBuilder.setType(PlanProto.EvalType.valueOf(node.getType().name()));
+ return nodeBuilder;
+ }
+
+ @Override
+ public EvalNode visitUnaryEval(EvalTreeProtoBuilderContext context, Stack<EvalNode> stack, UnaryEval unary) {
+ // visiting and registering childs
+ super.visitUnaryEval(context, stack, unary);
+ int [] childIds = registerGetChildIds(context, unary);
+
+ // building itself
+ PlanProto.UnaryEval.Builder unaryBuilder = PlanProto.UnaryEval.newBuilder();
+ unaryBuilder.setChildId(childIds[0]);
+ if (unary.getType() == EvalType.IS_NULL) {
+ IsNullEval isNullEval = (IsNullEval) unary;
+ unaryBuilder.setNegative(isNullEval.isNot());
+ } else if (unary.getType() == EvalType.SIGNED) {
+ SignedEval signedEval = (SignedEval) unary;
+ unaryBuilder.setNegative(signedEval.isNegative());
+ } else if (unary.getType() == EvalType.CAST) {
+ CastEval castEval = (CastEval) unary;
+ unaryBuilder.setCastingType(castEval.getValueType());
+ if (castEval.hasTimeZone()) {
+ unaryBuilder.setTimezone(castEval.getTimezone().getID());
+ }
+ }
+
+ // registering itself and building EvalNode
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, unary);
+ builder.setUnary(unaryBuilder);
+ context.treeBuilder.addNodes(builder);
+ return unary;
+ }
+
+ @Override
+ public EvalNode visitBinaryEval(EvalTreeProtoBuilderContext context, Stack<EvalNode> stack, BinaryEval binary) {
+ // visiting and registering childs
+ super.visitBinaryEval(context, stack, binary);
+ int [] childIds = registerGetChildIds(context, binary);
+
+ // registering itself and building EvalNode
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, binary);
+
+ // building itself
+ PlanProto.BinaryEval.Builder binaryBuilder = PlanProto.BinaryEval.newBuilder();
+ binaryBuilder.setLhsId(childIds[0]);
+ binaryBuilder.setRhsId(childIds[1]);
+
+ if (binary instanceof InEval) {
+ binaryBuilder.setNegative(((InEval)binary).isNot());
+ } else if (binary instanceof PatternMatchPredicateEval) {
+ PatternMatchPredicateEval patternMatch = (PatternMatchPredicateEval) binary;
+ binaryBuilder.setNegative(patternMatch.isNot());
+ builder.setPatternMatch(
+ PlanProto.PatternMatchEvalSpec.newBuilder().setCaseSensitive(patternMatch.isCaseInsensitive()));
+ }
+
+ builder.setBinary(binaryBuilder);
+ context.treeBuilder.addNodes(builder);
+ return binary;
+ }
+
+ @Override
+ public EvalNode visitConst(EvalTreeProtoBuilderContext context, ConstEval constant, Stack<EvalNode> stack) {
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, constant);
+ builder.setConst(PlanProto.ConstEval.newBuilder().setValue(serialize(constant.getValue())));
+ context.treeBuilder.addNodes(builder);
+ return constant;
+ }
+
+ @Override
+ public EvalNode visitRowConstant(EvalTreeProtoBuilderContext context, RowConstantEval rowConst,
+ Stack<EvalNode> stack) {
+
+ PlanProto.RowConstEval.Builder rowConstBuilder = PlanProto.RowConstEval.newBuilder();
+ for (Datum d : rowConst.getValues()) {
+ rowConstBuilder.addValues(serialize(d));
+ }
+
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, rowConst);
+ builder.setRowConst(rowConstBuilder);
+ context.treeBuilder.addNodes(builder);
+ return rowConst;
+ }
+
+ public EvalNode visitField(EvalTreeProtoBuilderContext context, Stack<EvalNode> stack, FieldEval field) {
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, field);
+ builder.setField(field.getColumnRef().getProto());
+ context.treeBuilder.addNodes(builder);
+ return field;
+ }
+
+ public EvalNode visitBetween(EvalTreeProtoBuilderContext context, BetweenPredicateEval between,
+ Stack<EvalNode> stack) {
+ // visiting and registering childs
+ super.visitBetween(context, between, stack);
+ int [] childIds = registerGetChildIds(context, between);
+ Preconditions.checkState(childIds.length == 3, "Between must have three childs, but there are " + childIds.length
+ + " child nodes");
+
+ // building itself
+ PlanProto.BetweenEval.Builder betweenBuilder = PlanProto.BetweenEval.newBuilder();
+ betweenBuilder.setNegative(between.isNot());
+ betweenBuilder.setSymmetric(between.isSymmetric());
+ betweenBuilder.setPredicand(childIds[0]);
+ betweenBuilder.setBegin(childIds[1]);
+ betweenBuilder.setEnd(childIds[2]);
+
+ // registering itself and building EvalNode
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, between);
+ builder.setBetween(betweenBuilder);
+ context.treeBuilder.addNodes(builder);
+ return between;
+ }
+
+ public EvalNode visitCaseWhen(EvalTreeProtoBuilderContext context, CaseWhenEval caseWhen, Stack<EvalNode> stack) {
+ // visiting and registering childs
+ super.visitCaseWhen(context, caseWhen, stack);
+ int [] childIds = registerGetChildIds(context, caseWhen);
+ Preconditions.checkState(childIds.length > 0, "Case When must have at least one child, but there is no child");
+
+ // building itself
+ PlanProto.CaseWhenEval.Builder caseWhenBuilder = PlanProto.CaseWhenEval.newBuilder();
+ int ifCondsNum = childIds.length - (caseWhen.hasElse() ? 1 : 0);
+ for (int i = 0; i < ifCondsNum; i++) {
+ caseWhenBuilder.addIfConds(childIds[i]);
+ }
+ if (caseWhen.hasElse()) {
+ caseWhenBuilder.setElse(childIds[childIds.length - 1]);
+ }
+
+ // registering itself and building EvalNode
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, caseWhen);
+ builder.setCasewhen(caseWhenBuilder);
+ context.treeBuilder.addNodes(builder);
+
+ return caseWhen;
+ }
+
+ public EvalNode visitIfThen(EvalTreeProtoBuilderContext context, CaseWhenEval.IfThenEval ifCond,
+ Stack<EvalNode> stack) {
+ // visiting and registering childs
+ super.visitIfThen(context, ifCond, stack);
+ int [] childIds = registerGetChildIds(context, ifCond);
+
+ // building itself
+ PlanProto.IfCondEval.Builder ifCondBuilder = PlanProto.IfCondEval.newBuilder();
+ ifCondBuilder.setCondition(childIds[0]);
+ ifCondBuilder.setThen(childIds[1]);
+
+ // registering itself and building EvalNode
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, ifCond);
+ builder.setIfCond(ifCondBuilder);
+ context.treeBuilder.addNodes(builder);
+
+ return ifCond;
+ }
+
+ public EvalNode visitFuncCall(EvalTreeProtoBuilderContext context, FunctionEval function, Stack<EvalNode> stack) {
+ // visiting and registering childs
+ super.visitFuncCall(context, function, stack);
+ int [] childIds = registerGetChildIds(context, function);
+
+ // building itself
+ PlanProto.FunctionEval.Builder funcBuilder = PlanProto.FunctionEval.newBuilder();
+ funcBuilder.setFuncion(function.getFuncDesc().getProto());
+ for (int i = 0; i < childIds.length; i++) {
+ funcBuilder.addParamIds(childIds[i]);
+ }
+
+ // registering itself and building EvalNode
+ PlanProto.EvalNode.Builder builder = createEvalBuilder(context, function);
+ builder.setFunction(funcBuilder);
+
+ if (function instanceof AggregationFunctionCallEval) {
+ AggregationFunctionCallEval aggFunc = (AggregationFunctionCallEval) function;
+
+ PlanProto.AggFunctionEvalSpec.Builder aggFunctionEvalBuilder = PlanProto.AggFunctionEvalSpec.newBuilder();
+ aggFunctionEvalBuilder.setIntermediatePhase(aggFunc.isIntermediatePhase());
+ aggFunctionEvalBuilder.setFinalPhase(aggFunc.isFinalPhase());
+ if (aggFunc.hasAlias()) {
+ aggFunctionEvalBuilder.setAlias(aggFunc.getAlias());
+ }
+
+ builder.setAggFunction(aggFunctionEvalBuilder);
+ }
+
+
+ if (function instanceof WindowFunctionEval) {
+ WindowFunctionEval winFunc = (WindowFunctionEval) function;
+ WinFunctionEvalSpec.Builder windowFuncBuilder = WinFunctionEvalSpec.newBuilder();
+
+ if (winFunc.hasSortSpecs()) {
+ windowFuncBuilder.addAllSortSpec(ProtoUtil.<CatalogProtos.SortSpecProto>toProtoObjects
+ (winFunc.getSortSpecs()));
+ }
+
+ windowFuncBuilder.setWindowFrame(buildWindowFrame(winFunc.getWindowFrame()));
+ builder.setWinFunction(windowFuncBuilder);
+ }
+
+
+ context.treeBuilder.addNodes(builder);
+ return function;
+ }
+
+ private WindowFrame buildWindowFrame(WindowSpec.WindowFrame frame) {
+ WindowFrame.Builder windowFrameBuilder = WindowFrame.newBuilder();
+
+ WindowSpec.WindowStartBound startBound = frame.getStartBound();
+ WindowSpec.WindowEndBound endBound = frame.getEndBound();
+
+ WinFunctionEvalSpec.WindowStartBound.Builder startBoundBuilder = WinFunctionEvalSpec.WindowStartBound.newBuilder();
+ startBoundBuilder.setBoundType(convertStartBoundType(startBound.getBoundType()));
+
+ WinFunctionEvalSpec.WindowEndBound.Builder endBoundBuilder = WinFunctionEvalSpec.WindowEndBound.newBuilder();
+ endBoundBuilder.setBoundType(convertEndBoundType(endBound.getBoundType()));
+
+ windowFrameBuilder.setStartBound(startBoundBuilder);
+ windowFrameBuilder.setEndBound(endBoundBuilder);
+
+ return windowFrameBuilder.build();
+ }
+
+ private WinFunctionEvalSpec.WindowFrameStartBoundType convertStartBoundType(WindowFrameStartBoundType type) {
+ if (type == WindowFrameStartBoundType.UNBOUNDED_PRECEDING) {
+ return WinFunctionEvalSpec.WindowFrameStartBoundType.S_UNBOUNDED_PRECEDING;
+ } else if (type == WindowFrameStartBoundType.CURRENT_ROW) {
+ return WinFunctionEvalSpec.WindowFrameStartBoundType.S_CURRENT_ROW;
+ } else if (type == WindowFrameStartBoundType.PRECEDING) {
+ return WinFunctionEvalSpec.WindowFrameStartBoundType.S_PRECEDING;
+ } else {
+ throw new IllegalStateException("Unknown Window Start Bound type: " + type.name());
+ }
+ }
+
+ private WinFunctionEvalSpec.WindowFrameEndBoundType convertEndBoundType(WindowFrameEndBoundType type) {
+ if (type == WindowFrameEndBoundType.UNBOUNDED_FOLLOWING) {
+ return WinFunctionEvalSpec.WindowFrameEndBoundType.E_UNBOUNDED_FOLLOWING;
+ } else if (type == WindowFrameEndBoundType.CURRENT_ROW) {
+ return WinFunctionEvalSpec.WindowFrameEndBoundType.E_CURRENT_ROW;
+ } else if (type == WindowFrameEndBoundType.FOLLOWING) {
+ return WinFunctionEvalSpec.WindowFrameEndBoundType.E_FOLLOWING;
+ } else {
+ throw new IllegalStateException("Unknown Window End Bound type: " + type.name());
+ }
+ }
+
+ public static PlanProto.Datum serialize(Datum datum) {
+ PlanProto.Datum.Builder builder = PlanProto.Datum.newBuilder();
+
+ builder.setType(datum.type());
+
+ switch (datum.type()) {
+ case NULL_TYPE:
+ break;
+ case BOOLEAN:
+ builder.setBoolean(datum.asBool());
+ break;
+ case INT1:
+ case INT2:
+ case INT4:
+ case DATE:
+ builder.setInt4(datum.asInt4());
+ break;
+ case INT8:
+ case TIMESTAMP:
+ case TIME:
+ builder.setInt8(datum.asInt8());
+ break;
+ case FLOAT4:
+ builder.setFloat4(datum.asFloat4());
+ break;
+ case FLOAT8:
+ builder.setFloat8(datum.asFloat8());
+ break;
+ case CHAR:
+ case VARCHAR:
+ case TEXT:
+ builder.setText(datum.asChars());
+ break;
+ case BINARY:
+ case BLOB:
+ builder.setBlob(ByteString.copyFrom(datum.asByteArray()));
+ break;
+ case INTERVAL:
+ IntervalDatum interval = (IntervalDatum) datum;
+ PlanProto.Interval.Builder intervalBuilder = PlanProto.Interval.newBuilder();
+ intervalBuilder.setMonth(interval.getMonths());
+ intervalBuilder.setMsec(interval.getMilliSeconds());
+ builder.setInterval(intervalBuilder);
+ break;
+ default:
+ throw new RuntimeException("Unknown data type: " + datum.type().name());
+ }
+
+ return builder.build();
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoDeserializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoDeserializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoDeserializer.java
deleted file mode 100644
index 89b4fc0..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoDeserializer.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Lisensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.plan.serder;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.tajo.OverridableConf;
-import org.apache.tajo.catalog.Column;
-import org.apache.tajo.catalog.FunctionDesc;
-import org.apache.tajo.catalog.exception.NoSuchFunctionException;
-import org.apache.tajo.catalog.proto.CatalogProtos;
-import org.apache.tajo.datum.*;
-import org.apache.tajo.exception.InternalException;
-import org.apache.tajo.plan.expr.*;
-import org.apache.tajo.plan.function.AggFunction;
-import org.apache.tajo.plan.function.GeneralFunction;
-
-import java.util.*;
-
-/**
- * It deserializes a serialized eval tree consisting of a number of EvalNodes.
- *
- * {@link EvalTreeProtoSerializer} serializes an eval tree in a postfix traverse order.
- * So, this class firstly sorts all serialized eval nodes in ascending order of their sequence IDs. Then,
- * it sequentially restores each serialized node to EvalNode instance.
- *
- * @see EvalTreeProtoSerializer
- */
-public class EvalTreeProtoDeserializer {
-
- public static EvalNode deserialize(OverridableConf context, PlanProto.EvalTree tree) {
- Map<Integer, EvalNode> evalNodeMap = Maps.newHashMap();
-
- // sort serialized eval nodes in an ascending order of their IDs.
- List<PlanProto.EvalNode> nodeList = Lists.newArrayList(tree.getNodesList());
- Collections.sort(nodeList, new Comparator<PlanProto.EvalNode>() {
- @Override
- public int compare(PlanProto.EvalNode o1, PlanProto.EvalNode o2) {
- return o1.getId() - o2.getId();
- }
- });
-
- EvalNode current = null;
-
- // The sorted order is the same of a postfix traverse order.
- // So, it sequentially transforms each serialized node into a EvalNode instance in a postfix order of
- // the original eval tree.
-
- Iterator<PlanProto.EvalNode> it = nodeList.iterator();
- while (it.hasNext()) {
- PlanProto.EvalNode protoNode = it.next();
-
- EvalType type = EvalType.valueOf(protoNode.getType().name());
-
- if (EvalType.isUnaryOperator(type)) {
- PlanProto.UnaryEval unaryProto = protoNode.getUnary();
- EvalNode child = evalNodeMap.get(unaryProto.getChildId());
-
- switch (type) {
- case NOT:
- current = new NotEval(child);
- break;
- case IS_NULL:
- current = new IsNullEval(unaryProto.getNegative(), child);
- break;
- case CAST:
- current = new CastEval(context, child, unaryProto.getCastingType());
- break;
- case SIGNED:
- current = new SignedEval(unaryProto.getNegative(), child);
- break;
- default:
- throw new RuntimeException("Unknown EvalType: " + type.name());
- }
-
- } else if (EvalType.isBinaryOperator(type)) {
- PlanProto.BinaryEval binProto = protoNode.getBinary();
- EvalNode lhs = evalNodeMap.get(binProto.getLhsId());
- EvalNode rhs = evalNodeMap.get(binProto.getRhsId());
-
- switch (type) {
- case IN:
- current = new InEval(lhs, (RowConstantEval) rhs, binProto.getNegative());
- break;
- default:
- current = new BinaryEval(type, lhs, rhs);
- }
-
- } else if (type == EvalType.CONST) {
- PlanProto.ConstEval constProto = protoNode.getConst();
- current = new ConstEval(deserialize(constProto.getValue()));
-
- } else if (type == EvalType.ROW_CONSTANT) {
- PlanProto.RowConstEval rowConstProto = protoNode.getRowConst();
- Datum[] values = new Datum[rowConstProto.getValuesCount()];
- for (int i = 0; i < rowConstProto.getValuesCount(); i++) {
- values[i] = deserialize(rowConstProto.getValues(i));
- }
- current = new RowConstantEval(values);
-
- } else if (type == EvalType.FIELD) {
- CatalogProtos.ColumnProto columnProto = protoNode.getField();
- current = new FieldEval(new Column(columnProto));
-
- } else if (type == EvalType.BETWEEN) {
- PlanProto.BetweenEval betweenProto = protoNode.getBetween();
- current = new BetweenPredicateEval(betweenProto.getNegative(), betweenProto.getSymmetric(),
- evalNodeMap.get(betweenProto.getPredicand()),
- evalNodeMap.get(betweenProto.getBegin()),
- evalNodeMap.get(betweenProto.getEnd()));
-
- } else if (type == EvalType.CASE) {
- PlanProto.CaseWhenEval caseWhenProto = protoNode.getCasewhen();
- CaseWhenEval caseWhenEval = new CaseWhenEval();
- for (int i = 0; i < caseWhenProto.getIfCondsCount(); i++) {
- caseWhenEval.addIfCond((CaseWhenEval.IfThenEval) evalNodeMap.get(caseWhenProto.getIfConds(i)));
- }
- if (caseWhenProto.hasElse()) {
- caseWhenEval.setElseResult(evalNodeMap.get(caseWhenProto.getElse()));
- }
- current = caseWhenEval;
-
- } else if (type == EvalType.IF_THEN) {
- PlanProto.IfCondEval ifCondProto = protoNode.getIfCond();
- current = new CaseWhenEval.IfThenEval(evalNodeMap.get(ifCondProto.getCondition()),
- evalNodeMap.get(ifCondProto.getThen()));
-
- } else if (EvalType.isFunction(type)) {
- PlanProto.FunctionEval funcProto = protoNode.getFunction();
-
- EvalNode [] params = new EvalNode[funcProto.getParamIdsCount()];
- for (int i = 0; i < funcProto.getParamIdsCount(); i++) {
- params[i] = evalNodeMap.get(funcProto.getParamIds(i));
- }
-
- FunctionDesc funcDesc = null;
- try {
- funcDesc = new FunctionDesc(funcProto.getFuncion());
- if (type == EvalType.FUNCTION) {
- GeneralFunction instance = (GeneralFunction) funcDesc.newInstance();
- current = new GeneralFunctionEval(context, new FunctionDesc(funcProto.getFuncion()), instance, params);
- } else if (type == EvalType.AGG_FUNCTION || type == EvalType.WINDOW_FUNCTION) {
- AggFunction instance = (AggFunction) funcDesc.newInstance();
- if (type == EvalType.AGG_FUNCTION) {
- current = new AggregationFunctionCallEval(new FunctionDesc(funcProto.getFuncion()), instance, params);
- } else {
- current = new WindowFunctionEval(new FunctionDesc(funcProto.getFuncion()), instance, params, null);
- }
- }
- } catch (ClassNotFoundException cnfe) {
- throw new NoSuchFunctionException(funcDesc.getFunctionName(), funcDesc.getParamTypes());
- } catch (InternalException ie) {
- throw new NoSuchFunctionException(funcDesc.getFunctionName(), funcDesc.getParamTypes());
- }
- } else {
- throw new RuntimeException("Unknown EvalType: " + type.name());
- }
-
- evalNodeMap.put(protoNode.getId(), current);
- }
-
- return current;
- }
-
- public static Datum deserialize(PlanProto.Datum datum) {
- switch (datum.getType()) {
- case BOOLEAN:
- return DatumFactory.createBool(datum.getBoolean());
- case CHAR:
- return DatumFactory.createChar(datum.getText());
- case INT1:
- case INT2:
- return DatumFactory.createInt2((short) datum.getInt4());
- case INT4:
- return DatumFactory.createInt4(datum.getInt4());
- case INT8:
- return DatumFactory.createInt8(datum.getInt8());
- case FLOAT4:
- return DatumFactory.createFloat4(datum.getFloat4());
- case FLOAT8:
- return DatumFactory.createFloat8(datum.getFloat8());
- case VARCHAR:
- case TEXT:
- return DatumFactory.createText(datum.getText());
- case TIMESTAMP:
- return new TimestampDatum(datum.getInt8());
- case DATE:
- return DatumFactory.createDate(datum.getInt4());
- case TIME:
- return DatumFactory.createTime(datum.getInt8());
- case BINARY:
- case BLOB:
- return DatumFactory.createBlob(datum.getBlob().toByteArray());
- case INTERVAL:
- return new IntervalDatum(datum.getInterval().getMonth(), datum.getInterval().getMsec());
- case NULL_TYPE:
- return NullDatum.get();
- default:
- throw new RuntimeException("Unknown data type: " + datum.getType().name());
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/tajo/blob/32be38d4/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoSerializer.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoSerializer.java b/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoSerializer.java
deleted file mode 100644
index 92a245f..0000000
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/serder/EvalTreeProtoSerializer.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Lisensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.tajo.plan.serder;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import com.google.protobuf.ByteString;
-import org.apache.tajo.datum.Datum;
-import org.apache.tajo.datum.IntervalDatum;
-import org.apache.tajo.plan.expr.*;
-
-import java.util.Map;
-import java.util.Stack;
-
-/**
- * It traverses an eval tree consisting of a number of {@link org.apache.tajo.plan.expr.EvalNode}
- * in a postfix traverse order. The postfix traverse order guarantees that all child nodes of some node N
- * were already visited when the node N is visited. This manner makes tree serialization possible in a simple logic.
- */
-public class EvalTreeProtoSerializer
- extends SimpleEvalNodeVisitor<EvalTreeProtoSerializer.EvalTreeProtoBuilderContext> {
-
- private static final EvalTreeProtoSerializer instance;
-
- static {
- instance = new EvalTreeProtoSerializer();
- }
-
- public static class EvalTreeProtoBuilderContext {
- private int seqId = 0;
- private Map<EvalNode, Integer> idMap = Maps.newHashMap();
- private PlanProto.EvalTree.Builder treeBuilder = PlanProto.EvalTree.newBuilder();
- }
-
- public static PlanProto.EvalTree serialize(EvalNode evalNode) {
- EvalTreeProtoSerializer.EvalTreeProtoBuilderContext context =
- new EvalTreeProtoSerializer.EvalTreeProtoBuilderContext();
- instance.visit(context, evalNode, new Stack<EvalNode>());
- return context.treeBuilder.build();
- }
-
- /**
- * Return child's serialization IDs. Usually, 0 is used for a child id of unary node or left child of
- * binary node. 1 is used for right child of binary node. Between will use 0 as predicand, 1 as begin, and 2 as
- * end eval node. For more detail, you should refer to each EvalNode implementation.
- *
- * @param context Context
- * @param evalNode EvalNode
- * @return The array of IDs which points to stored EvalNode.
- * @see org.apache.tajo.plan.expr.EvalNode
- */
- private int [] registerGetChildIds(EvalTreeProtoBuilderContext context, EvalNode evalNode) {
- int [] childIds = new int[evalNode.childNum()];
- for (int i = 0; i < evalNode.childNum(); i++) {
- if (context.idMap.containsKey(evalNode.getChild(i))) {
- childIds[i] = context.idMap.get(evalNode.getChild(i));
- } else {
- childIds[i] = context.seqId++;
- }
- }
- return childIds;
- }
-
- private PlanProto.EvalNode.Builder createEvalBuilder(EvalTreeProtoBuilderContext context, EvalNode node) {
- int sid; // serialization sequence id
- if (context.idMap.containsKey(node)) {
- sid = context.idMap.get(node);
- } else {
- sid = context.seqId++;
- context.idMap.put(node, sid);
- }
-
- PlanProto.EvalNode.Builder nodeBuilder = PlanProto.EvalNode.newBuilder();
- nodeBuilder.setId(sid);
- nodeBuilder.setDataType(node.getValueType());
- nodeBuilder.setType(PlanProto.EvalType.valueOf(node.getType().name()));
- return nodeBuilder;
- }
-
- @Override
- public EvalNode visitUnaryEval(EvalTreeProtoBuilderContext context, Stack<EvalNode> stack, UnaryEval unary) {
- // visiting and registering childs
- super.visitUnaryEval(context, stack, unary);
- int [] childIds = registerGetChildIds(context, unary);
-
- // building itself
- PlanProto.UnaryEval.Builder unaryBuilder = PlanProto.UnaryEval.newBuilder();
- unaryBuilder.setChildId(childIds[0]);
- if (unary.getType() == EvalType.IS_NULL) {
- IsNullEval isNullEval = (IsNullEval) unary;
- unaryBuilder.setNegative(isNullEval.isNot());
- } else if (unary.getType() == EvalType.SIGNED) {
- SignedEval signedEval = (SignedEval) unary;
- unaryBuilder.setNegative(signedEval.isNegative());
- } else if (unary.getType() == EvalType.CAST) {
- CastEval castEval = (CastEval) unary;
- unaryBuilder.setCastingType(castEval.getValueType());
- if (castEval.hasTimeZone()) {
- unaryBuilder.setTimezone(castEval.getTimezone().getID());
- }
- }
-
- // registering itself and building EvalNode
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, unary);
- builder.setUnary(unaryBuilder);
- context.treeBuilder.addNodes(builder);
- return unary;
- }
-
- @Override
- public EvalNode visitBinaryEval(EvalTreeProtoBuilderContext context, Stack<EvalNode> stack, BinaryEval binary) {
- // visiting and registering childs
- super.visitBinaryEval(context, stack, binary);
- int [] childIds = registerGetChildIds(context, binary);
-
- // building itself
- PlanProto.BinaryEval.Builder binaryBuilder = PlanProto.BinaryEval.newBuilder();
- binaryBuilder.setLhsId(childIds[0]);
- binaryBuilder.setRhsId(childIds[1]);
-
- // registering itself and building EvalNode
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, binary);
- builder.setBinary(binaryBuilder);
- context.treeBuilder.addNodes(builder);
- return binary;
- }
-
- @Override
- public EvalNode visitConst(EvalTreeProtoBuilderContext context, ConstEval constant, Stack<EvalNode> stack) {
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, constant);
- builder.setConst(PlanProto.ConstEval.newBuilder().setValue(serialize(constant.getValue())));
- context.treeBuilder.addNodes(builder);
- return constant;
- }
-
- @Override
- public EvalNode visitRowConstant(EvalTreeProtoBuilderContext context, RowConstantEval rowConst,
- Stack<EvalNode> stack) {
-
- PlanProto.RowConstEval.Builder rowConstBuilder = PlanProto.RowConstEval.newBuilder();
- for (Datum d : rowConst.getValues()) {
- rowConstBuilder.addValues(serialize(d));
- }
-
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, rowConst);
- builder.setRowConst(rowConstBuilder);
- context.treeBuilder.addNodes(builder);
- return rowConst;
- }
-
- public EvalNode visitField(EvalTreeProtoBuilderContext context, Stack<EvalNode> stack, FieldEval field) {
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, field);
- builder.setField(field.getColumnRef().getProto());
- context.treeBuilder.addNodes(builder);
- return field;
- }
-
- public EvalNode visitBetween(EvalTreeProtoBuilderContext context, BetweenPredicateEval between,
- Stack<EvalNode> stack) {
- // visiting and registering childs
- super.visitBetween(context, between, stack);
- int [] childIds = registerGetChildIds(context, between);
- Preconditions.checkState(childIds.length == 3, "Between must have three childs, but there are " + childIds.length
- + " child nodes");
-
- // building itself
- PlanProto.BetweenEval.Builder betweenBuilder = PlanProto.BetweenEval.newBuilder();
- betweenBuilder.setNegative(between.isNot());
- betweenBuilder.setSymmetric(between.isSymmetric());
- betweenBuilder.setPredicand(childIds[0]);
- betweenBuilder.setBegin(childIds[1]);
- betweenBuilder.setEnd(childIds[2]);
-
- // registering itself and building EvalNode
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, between);
- builder.setBetween(betweenBuilder);
- context.treeBuilder.addNodes(builder);
- return between;
- }
-
- public EvalNode visitCaseWhen(EvalTreeProtoBuilderContext context, CaseWhenEval caseWhen, Stack<EvalNode> stack) {
- // visiting and registering childs
- super.visitCaseWhen(context, caseWhen, stack);
- int [] childIds = registerGetChildIds(context, caseWhen);
- Preconditions.checkState(childIds.length > 0, "Case When must have at least one child, but there is no child");
-
- // building itself
- PlanProto.CaseWhenEval.Builder caseWhenBuilder = PlanProto.CaseWhenEval.newBuilder();
- int ifCondsNum = childIds.length - (caseWhen.hasElse() ? 1 : 0);
- for (int i = 0; i < ifCondsNum; i++) {
- caseWhenBuilder.addIfConds(childIds[i]);
- }
- if (caseWhen.hasElse()) {
- caseWhenBuilder.setElse(childIds[childIds.length - 1]);
- }
-
- // registering itself and building EvalNode
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, caseWhen);
- builder.setCasewhen(caseWhenBuilder);
- context.treeBuilder.addNodes(builder);
-
- return caseWhen;
- }
-
- public EvalNode visitIfThen(EvalTreeProtoBuilderContext context, CaseWhenEval.IfThenEval ifCond,
- Stack<EvalNode> stack) {
- // visiting and registering childs
- super.visitIfThen(context, ifCond, stack);
- int [] childIds = registerGetChildIds(context, ifCond);
-
- // building itself
- PlanProto.IfCondEval.Builder ifCondBuilder = PlanProto.IfCondEval.newBuilder();
- ifCondBuilder.setCondition(childIds[0]);
- ifCondBuilder.setThen(childIds[1]);
-
- // registering itself and building EvalNode
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, ifCond);
- builder.setIfCond(ifCondBuilder);
- context.treeBuilder.addNodes(builder);
-
- return ifCond;
- }
-
- public EvalNode visitFuncCall(EvalTreeProtoBuilderContext context, FunctionEval function, Stack<EvalNode> stack) {
- // visiting and registering childs
- super.visitFuncCall(context, function, stack);
- int [] childIds = registerGetChildIds(context, function);
-
- // building itself
- PlanProto.FunctionEval.Builder funcBuilder = PlanProto.FunctionEval.newBuilder();
- funcBuilder.setFuncion(function.getFuncDesc().getProto());
- for (int i = 0; i < childIds.length; i++) {
- funcBuilder.addParamIds(childIds[i]);
- }
-
- // registering itself and building EvalNode
- PlanProto.EvalNode.Builder builder = createEvalBuilder(context, function);
- builder.setFunction(funcBuilder);
- context.treeBuilder.addNodes(builder);
-
- return function;
- }
-
- public static PlanProto.Datum serialize(Datum datum) {
- PlanProto.Datum.Builder builder = PlanProto.Datum.newBuilder();
-
- builder.setType(datum.type());
-
- switch (datum.type()) {
- case NULL_TYPE:
- break;
- case BOOLEAN:
- builder.setBoolean(datum.asBool());
- break;
- case INT1:
- case INT2:
- case INT4:
- case DATE:
- builder.setInt4(datum.asInt4());
- break;
- case INT8:
- case TIMESTAMP:
- case TIME:
- builder.setInt8(datum.asInt8());
- break;
- case FLOAT4:
- builder.setFloat4(datum.asFloat4());
- break;
- case FLOAT8:
- builder.setFloat8(datum.asFloat8());
- break;
- case CHAR:
- case VARCHAR:
- case TEXT:
- builder.setText(datum.asChars());
- break;
- case BINARY:
- case BLOB:
- builder.setBlob(ByteString.copyFrom(datum.asByteArray()));
- break;
- case INTERVAL:
- IntervalDatum interval = (IntervalDatum) datum;
- PlanProto.Interval.Builder intervalBuilder = PlanProto.Interval.newBuilder();
- intervalBuilder.setMonth(interval.getMonths());
- intervalBuilder.setMsec(interval.getMilliSeconds());
- builder.setInterval(intervalBuilder);
- break;
- default:
- throw new RuntimeException("Unknown data type: " + datum.type().name());
- }
-
- return builder.build();
- }
-}
[5/8] tajo git commit: TAJO-1176: Implements queryable virtual tables
for catalog information
Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/MemStore.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/MemStore.java b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/MemStore.java
index 9575c13..51f65ee 100644
--- a/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/MemStore.java
+++ b/tajo-catalog/tajo-catalog-server/src/main/java/org/apache/tajo/catalog/store/MemStore.java
@@ -22,6 +22,7 @@
package org.apache.tajo.catalog.store;
import com.google.common.collect.Maps;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.tajo.TajoConstants;
@@ -29,7 +30,17 @@ import org.apache.tajo.catalog.CatalogUtil;
import org.apache.tajo.catalog.FunctionDesc;
import org.apache.tajo.catalog.exception.*;
import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.DatabaseProto;
import org.apache.tajo.catalog.proto.CatalogProtos.IndexDescProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.IndexProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescriptorProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableOptionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablePartitionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
+import org.apache.tajo.rpc.protocolrecords.PrimitiveProtos.KeyValueProto;
+import org.apache.tajo.util.TUtil;
import java.io.IOException;
import java.util.*;
@@ -84,6 +95,22 @@ public class MemStore implements CatalogStore {
public Collection<String> getAllTablespaceNames() throws CatalogException {
return tablespaces.keySet();
}
+
+ @Override
+ public List<TablespaceProto> getTablespaces() throws CatalogException {
+ List<TablespaceProto> tablespaceList = TUtil.newList();
+ int tablespaceId = 0;
+
+ for (String spaceName: tablespaces.keySet()) {
+ TablespaceProto.Builder builder = TablespaceProto.newBuilder();
+ builder.setSpaceName(spaceName);
+ builder.setUri(tablespaces.get(spaceName));
+ builder.setId(tablespaceId++);
+ tablespaceList.add(builder.build());
+ }
+
+ return tablespaceList;
+ }
@Override
public TablespaceProto getTablespace(String spaceName) throws CatalogException {
@@ -139,6 +166,24 @@ public class MemStore implements CatalogStore {
public Collection<String> getAllDatabaseNames() throws CatalogException {
return databases.keySet();
}
+
+ @Override
+ public List<DatabaseProto> getAllDatabases() throws CatalogException {
+ List<DatabaseProto> databaseList = new ArrayList<DatabaseProto>();
+ int dbId = 0;
+
+ for (String databaseName: databases.keySet()) {
+ DatabaseProto.Builder builder = DatabaseProto.newBuilder();
+
+ builder.setId(dbId++);
+ builder.setName(databaseName);
+ builder.setSpaceId(0);
+
+ databaseList.add(builder.build());
+ }
+
+ return databaseList;
+ }
/**
* Get a database namespace from a Map instance.
@@ -303,6 +348,118 @@ public class MemStore implements CatalogStore {
Map<String, CatalogProtos.TableDescProto> database = checkAndGetDatabaseNS(databases, databaseName);
return new ArrayList<String>(database.keySet());
}
+
+ @Override
+ public List<TableDescriptorProto> getAllTables() throws CatalogException {
+ List<TableDescriptorProto> tableList = new ArrayList<CatalogProtos.TableDescriptorProto>();
+ int dbId = 0, tableId = 0;
+
+ for (String databaseName: databases.keySet()) {
+ Map<String, TableDescProto> tables = databases.get(databaseName);
+ List<String> tableNameList = TUtil.newList(tables.keySet());
+ Collections.sort(tableNameList);
+
+ for (String tableName: tableNameList) {
+ TableDescProto tableDesc = tables.get(tableName);
+ TableDescriptorProto.Builder builder = TableDescriptorProto.newBuilder();
+
+ builder.setDbId(dbId);
+ builder.setTid(tableId);
+ builder.setName(tableName);
+ builder.setPath(tableDesc.getPath());
+ builder.setTableType(tableDesc.getIsExternal()?"EXTERNAL":"BASE");
+ builder.setStoreType(tableDesc.getMeta().getStoreType().toString());
+
+ tableList.add(builder.build());
+ tableId++;
+ }
+ dbId++;
+ }
+
+ return tableList;
+ }
+
+ @Override
+ public List<TableOptionProto> getAllTableOptions() throws CatalogException {
+ List<TableOptionProto> optionList = new ArrayList<CatalogProtos.TableOptionProto>();
+ int tid = 0;
+
+ for (String databaseName: databases.keySet()) {
+ Map<String, TableDescProto> tables = databases.get(databaseName);
+ List<String> tableNameList = TUtil.newList(tables.keySet());
+ Collections.sort(tableNameList);
+
+ for (String tableName: tableNameList) {
+ TableDescProto table = tables.get(tableName);
+ List<KeyValueProto> keyValueList = table.getMeta().getParams().getKeyvalList();
+
+ for (KeyValueProto keyValue: keyValueList) {
+ TableOptionProto.Builder builder = TableOptionProto.newBuilder();
+
+ builder.setTid(tid);
+ builder.setKeyval(keyValue);
+
+ optionList.add(builder.build());
+ }
+ }
+ tid++;
+ }
+
+ return optionList;
+ }
+
+ @Override
+ public List<TableStatsProto> getAllTableStats() throws CatalogException {
+ List<TableStatsProto> statList = new ArrayList<CatalogProtos.TableStatsProto>();
+ int tid = 0;
+
+ for (String databaseName: databases.keySet()) {
+ Map<String, TableDescProto> tables = databases.get(databaseName);
+ List<String> tableNameList = TUtil.newList(tables.keySet());
+ Collections.sort(tableNameList);
+
+ for (String tableName: tableNameList) {
+ TableDescProto table = tables.get(tableName);
+ TableStatsProto.Builder builder = TableStatsProto.newBuilder();
+
+ builder.setTid(tid);
+ builder.setNumRows(table.getStats().getNumRows());
+ builder.setNumBytes(table.getStats().getNumBytes());
+
+ statList.add(builder.build());
+ }
+ tid++;
+ }
+
+ return statList;
+ }
+
+ @Override
+ public List<ColumnProto> getAllColumns() throws CatalogException {
+ List<ColumnProto> columnList = new ArrayList<CatalogProtos.ColumnProto>();
+ int tid = 0;
+
+ for (String databaseName: databases.keySet()) {
+ Map<String, TableDescProto> tables = databases.get(databaseName);
+ List<String> tableNameList = TUtil.newList(tables.keySet());
+ Collections.sort(tableNameList);
+
+ for (String tableName: tableNameList) {
+ TableDescProto tableDesc = tables.get(tableName);
+
+ for (ColumnProto column: tableDesc.getSchema().getFieldsList()) {
+ ColumnProto.Builder builder = ColumnProto.newBuilder();
+ builder.setTid(tid);
+ builder.setName(column.getName());
+ builder.setDataType(column.getDataType());
+ columnList.add(builder.build());
+ }
+ }
+ tid++;
+ }
+
+ return columnList;
+ }
@Override
public void addPartitionMethod(CatalogProtos.PartitionMethodProto partitionMethodProto) throws CatalogException {
@@ -370,6 +527,11 @@ public class MemStore implements CatalogStore {
public void dropPartitions(String tableName) throws CatalogException {
throw new RuntimeException("not supported!");
}
+
+ @Override
+ public List<TablePartitionProto> getAllPartitions() throws CatalogException {
+ throw new UnsupportedOperationException();
+ }
/* (non-Javadoc)
* @see CatalogStore#createIndex(nta.catalog.proto.CatalogProtos.IndexDescProto)
@@ -455,6 +617,33 @@ public class MemStore implements CatalogStore {
return protos.toArray(new IndexDescProto[protos.size()]);
}
+
+ @Override
+ public List<IndexProto> getAllIndexes() throws CatalogException {
+ List<IndexProto> indexList = new ArrayList<CatalogProtos.IndexProto>();
+ Set<String> databases = indexes.keySet();
+
+ for (String databaseName: databases) {
+ Map<String, IndexDescProto> indexMap = indexes.get(databaseName);
+
+ for (String indexName: indexMap.keySet()) {
+ IndexDescProto indexDesc = indexMap.get(indexName);
+ IndexProto.Builder builder = IndexProto.newBuilder();
+
+ builder.setColumnName(indexDesc.getColumn().getName());
+ builder.setDataType(indexDesc.getColumn().getDataType().getType().toString());
+ builder.setIndexName(indexName);
+ builder.setIndexType(indexDesc.getIndexMethod().toString());
+ builder.setIsAscending(indexDesc.hasIsAscending() && indexDesc.getIsAscending());
+ builder.setIsClustered(indexDesc.hasIsClustered() && indexDesc.getIsClustered());
+ builder.setIsUnique(indexDesc.hasIsUnique() && indexDesc.getIsUnique());
+
+ indexList.add(builder.build());
+ }
+ }
+
+ return indexList;
+ }
@Override
public void addFunction(FunctionDesc func) throws CatalogException {
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
----------------------------------------------------------------------
diff --git a/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
index a044d64..43c6f7d 100644
--- a/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
+++ b/tajo-catalog/tajo-catalog-server/src/test/java/org/apache/tajo/catalog/TestCatalog.java
@@ -19,8 +19,10 @@
package org.apache.tajo.catalog;
import com.google.common.collect.Sets;
+
import org.apache.hadoop.fs.Path;
import org.apache.tajo.TajoConstants;
+import org.apache.tajo.catalog.dictionary.InfoSchemaMetadataDictionary;
import org.apache.tajo.catalog.exception.CatalogException;
import org.apache.tajo.catalog.exception.NoSuchFunctionException;
import org.apache.tajo.catalog.store.PostgreSQLStore;
@@ -53,7 +55,6 @@ import static org.apache.tajo.catalog.proto.CatalogProtos.AlterTablespaceProto;
import static org.apache.tajo.catalog.proto.CatalogProtos.AlterTablespaceProto.AlterTablespaceType;
import static org.apache.tajo.catalog.proto.CatalogProtos.AlterTablespaceProto.SetLocation;
import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
public class TestCatalog {
static final String FieldName1="f1";
@@ -211,6 +212,7 @@ public class TestCatalog {
@Test
public void testCreateAndDropManyDatabases() throws Exception {
List<String> createdDatabases = new ArrayList<String>();
+ InfoSchemaMetadataDictionary dictionary = new InfoSchemaMetadataDictionary();
String namePrefix = "database_";
final int NUM = 10;
for (int i = 0; i < NUM; i++) {
@@ -223,10 +225,11 @@ public class TestCatalog {
Collection<String> allDatabaseNames = catalog.getAllDatabaseNames();
for (String databaseName : allDatabaseNames) {
- assertTrue(databaseName.equals(DEFAULT_DATABASE_NAME) || createdDatabases.contains(databaseName));
+ assertTrue(databaseName.equals(DEFAULT_DATABASE_NAME) || createdDatabases.contains(databaseName) ||
+ dictionary.isSystemDatabase(databaseName));
}
- // additional one is 'default' database.
- assertEquals(NUM + 1, allDatabaseNames.size());
+ // additional ones are 'default' and 'system' databases.
+ assertEquals(NUM + 2, allDatabaseNames.size());
Collections.shuffle(createdDatabases);
for (String tobeDropped : createdDatabases) {
@@ -351,8 +354,8 @@ public class TestCatalog {
}
}
- // Finally, only default database will remain. So, its result is 1.
- assertEquals(1, catalog.getAllDatabaseNames().size());
+ // Finally, default and system database will remain. So, its result is 1.
+ assertEquals(2, catalog.getAllDatabaseNames().size());
}
@Test
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultFileScanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultFileScanner.java b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultFileScanner.java
new file mode 100644
index 0000000..d6ea459
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultFileScanner.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.master;
+
+import com.google.protobuf.ByteString;
+
+import org.apache.tajo.ExecutionBlockId;
+import org.apache.tajo.QueryId;
+import org.apache.tajo.TaskAttemptId;
+import org.apache.tajo.TaskId;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.proto.CatalogProtos.FragmentProto;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.plan.logical.ScanNode;
+import org.apache.tajo.engine.planner.physical.SeqScanExec;
+import org.apache.tajo.engine.query.QueryContext;
+import org.apache.tajo.storage.RowStoreUtil;
+import org.apache.tajo.storage.RowStoreUtil.RowStoreEncoder;
+import org.apache.tajo.storage.StorageManager;
+import org.apache.tajo.storage.Tuple;
+import org.apache.tajo.storage.fragment.Fragment;
+import org.apache.tajo.storage.fragment.FragmentConvertor;
+import org.apache.tajo.worker.TaskAttemptContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class NonForwardQueryResultFileScanner implements NonForwardQueryResultScanner {
+ private static final int MAX_FRAGMENT_NUM_PER_SCAN = 100;
+
+ private QueryId queryId;
+ private String sessionId;
+ private SeqScanExec scanExec;
+ private TableDesc tableDesc;
+ private RowStoreEncoder rowEncoder;
+ private int maxRow;
+ private int currentNumRows;
+ private TaskAttemptContext taskContext;
+ private TajoConf tajoConf;
+ private ScanNode scanNode;
+
+ private int currentFragmentIndex = 0;
+
+ public NonForwardQueryResultFileScanner(TajoConf tajoConf, String sessionId, QueryId queryId, ScanNode scanNode,
+ TableDesc tableDesc, int maxRow) throws IOException {
+ this.tajoConf = tajoConf;
+ this.sessionId = sessionId;
+ this.queryId = queryId;
+ this.scanNode = scanNode;
+ this.tableDesc = tableDesc;
+ this.maxRow = maxRow;
+ this.rowEncoder = RowStoreUtil.createEncoder(tableDesc.getLogicalSchema());
+ }
+
+ public void init() throws IOException {
+ initSeqScanExec();
+ }
+
+ private void initSeqScanExec() throws IOException {
+ List<Fragment> fragments = StorageManager.getStorageManager(tajoConf, tableDesc.getMeta().getStoreType())
+ .getNonForwardSplit(tableDesc, currentFragmentIndex, MAX_FRAGMENT_NUM_PER_SCAN);
+
+ if (fragments != null && !fragments.isEmpty()) {
+ FragmentProto[] fragmentProtos = FragmentConvertor.toFragmentProtoArray(fragments.toArray(new Fragment[] {}));
+ this.taskContext = new TaskAttemptContext(
+ new QueryContext(tajoConf), null,
+ new TaskAttemptId(new TaskId(new ExecutionBlockId(queryId, 1), 0), 0),
+ fragmentProtos, null);
+ try {
+ // scanNode must be clone cause SeqScanExec change target in the case of
+ // a partitioned table.
+ scanExec = new SeqScanExec(taskContext, (ScanNode) scanNode.clone(), fragmentProtos);
+ } catch (CloneNotSupportedException e) {
+ throw new IOException(e.getMessage(), e);
+ }
+ scanExec.init();
+ currentFragmentIndex += fragments.size();
+ }
+ }
+
+ public QueryId getQueryId() {
+ return queryId;
+ }
+
+ public String getSessionId() {
+ return sessionId;
+ }
+
+ public void setScanExec(SeqScanExec scanExec) {
+ this.scanExec = scanExec;
+ }
+
+ public TableDesc getTableDesc() {
+ return tableDesc;
+ }
+
+ public void close() throws Exception {
+ if (scanExec != null) {
+ scanExec.close();
+ scanExec = null;
+ }
+ }
+
+ public List<ByteString> getNextRows(int fetchRowNum) throws IOException {
+ List<ByteString> rows = new ArrayList<ByteString>();
+ if (scanExec == null) {
+ return rows;
+ }
+ int rowCount = 0;
+ while (true) {
+ Tuple tuple = scanExec.next();
+ if (tuple == null) {
+ scanExec.close();
+ scanExec = null;
+ initSeqScanExec();
+ if (scanExec != null) {
+ tuple = scanExec.next();
+ }
+ if (tuple == null) {
+ if (scanExec != null) {
+ scanExec.close();
+ scanExec = null;
+ }
+ break;
+ }
+ }
+ rows.add(ByteString.copyFrom((rowEncoder.toBytes(tuple))));
+ rowCount++;
+ currentNumRows++;
+ if (rowCount >= fetchRowNum) {
+ break;
+ }
+ if (currentNumRows >= maxRow) {
+ scanExec.close();
+ scanExec = null;
+ break;
+ }
+ }
+ return rows;
+ }
+
+ @Override
+ public Schema getLogicalSchema() {
+ return tableDesc.getLogicalSchema();
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultScanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultScanner.java b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultScanner.java
index aced80c..7e7d705 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultScanner.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultScanner.java
@@ -18,149 +18,29 @@
package org.apache.tajo.master;
-import com.google.protobuf.ByteString;
-import org.apache.tajo.ExecutionBlockId;
-import org.apache.tajo.QueryId;
-import org.apache.tajo.TaskAttemptId;
-import org.apache.tajo.TaskId;
-import org.apache.tajo.catalog.TableDesc;
-import org.apache.tajo.catalog.proto.CatalogProtos.FragmentProto;
-import org.apache.tajo.conf.TajoConf;
-import org.apache.tajo.plan.logical.ScanNode;
-import org.apache.tajo.engine.planner.physical.SeqScanExec;
-import org.apache.tajo.engine.query.QueryContext;
-import org.apache.tajo.storage.RowStoreUtil;
-import org.apache.tajo.storage.RowStoreUtil.RowStoreEncoder;
-import org.apache.tajo.storage.StorageManager;
-import org.apache.tajo.storage.Tuple;
-import org.apache.tajo.storage.fragment.Fragment;
-import org.apache.tajo.storage.fragment.FragmentConvertor;
-import org.apache.tajo.worker.TaskAttemptContext;
-
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
-public class NonForwardQueryResultScanner {
- private static final int MAX_FRAGMENT_NUM_PER_SCAN = 100;
-
- private QueryId queryId;
- private String sessionId;
- private SeqScanExec scanExec;
- private TableDesc tableDesc;
- private RowStoreEncoder rowEncoder;
- private int maxRow;
- private int currentNumRows;
- private TaskAttemptContext taskContext;
- private TajoConf tajoConf;
- private ScanNode scanNode;
-
- private int currentFragmentIndex = 0;
-
- public NonForwardQueryResultScanner(TajoConf tajoConf, String sessionId,
- QueryId queryId,
- ScanNode scanNode,
- TableDesc tableDesc,
- int maxRow) throws IOException {
- this.tajoConf = tajoConf;
- this.sessionId = sessionId;
- this.queryId = queryId;
- this.scanNode = scanNode;
- this.tableDesc = tableDesc;
- this.maxRow = maxRow;
-
- this.rowEncoder = RowStoreUtil.createEncoder(tableDesc.getLogicalSchema());
- }
-
- public void init() throws IOException {
- initSeqScanExec();
- }
-
- private void initSeqScanExec() throws IOException {
- List<Fragment> fragments = StorageManager.getStorageManager(tajoConf, tableDesc.getMeta().getStoreType())
- .getNonForwardSplit(tableDesc, currentFragmentIndex, MAX_FRAGMENT_NUM_PER_SCAN);
-
- if (fragments != null && !fragments.isEmpty()) {
- FragmentProto[] fragmentProtos = FragmentConvertor.toFragmentProtoArray(fragments.toArray(new Fragment[]{}));
- this.taskContext = new TaskAttemptContext(
- new QueryContext(tajoConf), null,
- new TaskAttemptId(new TaskId(new ExecutionBlockId(queryId, 1), 0), 0),
- fragmentProtos, null);
-
- try {
- // scanNode must be clone cause SeqScanExec change target in the case of a partitioned table.
- scanExec = new SeqScanExec(taskContext, (ScanNode)scanNode.clone(), fragmentProtos);
- } catch (CloneNotSupportedException e) {
- throw new IOException(e.getMessage(), e);
- }
- scanExec.init();
- currentFragmentIndex += fragments.size();
- }
- }
-
- public QueryId getQueryId() {
- return queryId;
- }
-
- public String getSessionId() {
- return sessionId;
- }
-
- public void setScanExec(SeqScanExec scanExec) {
- this.scanExec = scanExec;
- }
+import org.apache.tajo.QueryId;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableDesc;
- public TableDesc getTableDesc() {
- return tableDesc;
- }
+import com.google.protobuf.ByteString;
- public void close() throws Exception {
- if (scanExec != null) {
- scanExec.close();
- scanExec = null;
- }
- }
+public interface NonForwardQueryResultScanner {
- public List<ByteString> getNextRows(int fetchRowNum) throws IOException {
- List<ByteString> rows = new ArrayList<ByteString>();
- if (scanExec == null) {
- return rows;
- }
- int rowCount = 0;
+ public void close() throws Exception;
- while (true) {
- Tuple tuple = scanExec.next();
- if (tuple == null) {
- scanExec.close();
- scanExec = null;
+ public Schema getLogicalSchema();
- initSeqScanExec();
- if (scanExec != null) {
- tuple = scanExec.next();
- }
- if (tuple == null) {
- if (scanExec != null ) {
- scanExec.close();
- scanExec = null;
- }
+ public List<ByteString> getNextRows(int fetchRowNum) throws IOException;
- break;
- }
- }
- rows.add(ByteString.copyFrom((rowEncoder.toBytes(tuple))));
- rowCount++;
- currentNumRows++;
- if (rowCount >= fetchRowNum) {
- break;
- }
+ public QueryId getQueryId();
+
+ public String getSessionId();
+
+ public TableDesc getTableDesc();
- if (currentNumRows >= maxRow) {
- scanExec.close();
- scanExec = null;
- break;
- }
- }
+ public void init() throws IOException;
- return rows;
- }
}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultSystemScanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultSystemScanner.java b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultSystemScanner.java
new file mode 100644
index 0000000..c6466f5
--- /dev/null
+++ b/tajo-core/src/main/java/org/apache/tajo/master/NonForwardQueryResultSystemScanner.java
@@ -0,0 +1,616 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Stack;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.tajo.QueryId;
+import org.apache.tajo.TaskAttemptId;
+import org.apache.tajo.TaskId;
+import org.apache.tajo.catalog.CatalogUtil;
+import org.apache.tajo.catalog.Column;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.catalog.TableDesc;
+import org.apache.tajo.catalog.TableMeta;
+import org.apache.tajo.catalog.proto.CatalogProtos.ColumnProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.DatabaseProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.IndexProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableDescriptorProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableOptionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablePartitionProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TableStatsProto;
+import org.apache.tajo.catalog.proto.CatalogProtos.TablespaceProto;
+import org.apache.tajo.catalog.statistics.TableStats;
+import org.apache.tajo.common.TajoDataTypes.DataType;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.datum.DatumFactory;
+import org.apache.tajo.engine.codegen.CompilationError;
+import org.apache.tajo.engine.planner.PhysicalPlannerImpl;
+import org.apache.tajo.engine.planner.Projector;
+import org.apache.tajo.engine.planner.global.ExecutionBlock;
+import org.apache.tajo.engine.planner.global.ExecutionBlockCursor;
+import org.apache.tajo.engine.planner.global.GlobalPlanner;
+import org.apache.tajo.engine.planner.global.MasterPlan;
+import org.apache.tajo.engine.planner.physical.PhysicalExec;
+import org.apache.tajo.engine.query.QueryContext;
+import org.apache.tajo.master.TajoMaster.MasterContext;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.PlanningException;
+import org.apache.tajo.plan.expr.EvalNode;
+import org.apache.tajo.plan.logical.IndexScanNode;
+import org.apache.tajo.plan.logical.LogicalNode;
+import org.apache.tajo.plan.logical.ScanNode;
+import org.apache.tajo.storage.RowStoreUtil;
+import org.apache.tajo.storage.Tuple;
+import org.apache.tajo.storage.VTuple;
+import org.apache.tajo.storage.RowStoreUtil.RowStoreEncoder;
+import org.apache.tajo.util.KeyValueSet;
+import org.apache.tajo.util.TUtil;
+import org.apache.tajo.worker.TaskAttemptContext;
+
+import com.google.protobuf.ByteString;
+
+public class NonForwardQueryResultSystemScanner implements NonForwardQueryResultScanner {
+
+ private final Log LOG = LogFactory.getLog(getClass());
+
+ private MasterContext masterContext;
+ private LogicalPlan logicalPlan;
+ private final QueryId queryId;
+ private final String sessionId;
+ private TaskAttemptContext taskContext;
+ private int currentRow;
+ private long maxRow;
+ private TableDesc tableDesc;
+ private Schema outSchema;
+ private RowStoreEncoder encoder;
+ private PhysicalExec physicalExec;
+
+ public NonForwardQueryResultSystemScanner(MasterContext context, LogicalPlan plan, QueryId queryId,
+ String sessionId, int maxRow) {
+ masterContext = context;
+ logicalPlan = plan;
+ this.queryId = queryId;
+ this.sessionId = sessionId;
+ this.maxRow = maxRow;
+
+ }
+
+ @Override
+ public void init() throws IOException {
+ QueryContext queryContext = new QueryContext(masterContext.getConf());
+ currentRow = 0;
+
+ MasterPlan masterPlan = new MasterPlan(queryId, queryContext, logicalPlan);
+ GlobalPlanner globalPlanner = new GlobalPlanner(masterContext.getConf(), masterContext.getCatalog());
+ try {
+ globalPlanner.build(masterPlan);
+ } catch (PlanningException e) {
+ throw new RuntimeException(e);
+ }
+
+ ExecutionBlockCursor cursor = new ExecutionBlockCursor(masterPlan);
+ ExecutionBlock leafBlock = null;
+ while (cursor.hasNext()) {
+ ExecutionBlock block = cursor.nextBlock();
+ if (masterPlan.isLeaf(block)) {
+ leafBlock = block;
+ break;
+ }
+ }
+
+ taskContext = new TaskAttemptContext(queryContext, null,
+ new TaskAttemptId(new TaskId(leafBlock.getId(), 0), 0),
+ null, null);
+ physicalExec = new SimplePhysicalPlannerImpl(masterContext.getConf())
+ .createPlan(taskContext, leafBlock.getPlan());
+
+ tableDesc = new TableDesc("table_"+System.currentTimeMillis(), physicalExec.getSchema(),
+ new TableMeta(StoreType.SYSTEM, new KeyValueSet()), null);
+ outSchema = physicalExec.getSchema();
+ encoder = RowStoreUtil.createEncoder(getLogicalSchema());
+
+ physicalExec.init();
+ }
+
+ @Override
+ public void close() throws Exception {
+ tableDesc = null;
+ outSchema = null;
+ encoder = null;
+ if (physicalExec != null) {
+ try {
+ physicalExec.close();
+ } catch (Exception ignored) {}
+ }
+ physicalExec = null;
+ currentRow = -1;
+ }
+
+ private List<Tuple> getTablespaces(Schema outSchema) {
+ List<TablespaceProto> tablespaces = masterContext.getCatalog().getAllTablespaces();
+ List<Tuple> tuples = new ArrayList<Tuple>(tablespaces.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (TablespaceProto tablespace: tablespaces) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+ if ("space_id".equalsIgnoreCase(column.getSimpleName())) {
+ if (tablespace.hasId()) {
+ aTuple.put(fieldId, DatumFactory.createInt4(tablespace.getId()));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ } else if ("space_name".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(tablespace.getSpaceName()));
+ } else if ("space_handler".equalsIgnoreCase(column.getSimpleName())) {
+ if (tablespace.hasHandler()) {
+ aTuple.put(fieldId, DatumFactory.createText(tablespace.getHandler()));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ } else if ("space_uri".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(tablespace.getUri()));
+ }
+ }
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getDatabases(Schema outSchema) {
+ List<DatabaseProto> databases = masterContext.getCatalog().getAllDatabases();
+ List<Tuple> tuples = new ArrayList<Tuple>(databases.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (DatabaseProto database: databases) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+ if ("db_id".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(database.getId()));
+ } else if ("db_name".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(database.getName()));
+ } else if ("space_id".equalsIgnoreCase(column.getSimpleName())) {
+ if (database.hasSpaceId()) {
+ aTuple.put(fieldId, DatumFactory.createInt4(database.getSpaceId()));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ }
+ }
+
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getTables(Schema outSchema) {
+ List<TableDescriptorProto> tables = masterContext.getCatalog().getAllTables();
+ List<Tuple> tuples = new ArrayList<Tuple>(tables.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (TableDescriptorProto table: tables) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+ if ("tid".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(table.getTid()));
+ } else if ("db_id".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(table.getDbId()));
+ } else if ("table_name".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(table.getName()));
+ } else if ("table_type".equalsIgnoreCase(column.getSimpleName())) {
+ if (table.hasTableType()) {
+ aTuple.put(fieldId, DatumFactory.createText(table.getTableType()));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ } else if ("path".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(table.getPath()));
+ } else if ("store_type".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(table.getStoreType()));
+ }
+ }
+
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getColumns(Schema outSchema) {
+ List<ColumnProto> columnsList = masterContext.getCatalog().getAllColumns();
+ List<Tuple> tuples = new ArrayList<Tuple>(columnsList.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+ int columnId = 1, prevtid = -1, tid = 0;
+
+ for (ColumnProto column: columnsList) {
+ aTuple = new VTuple(outSchema.size());
+
+ tid = column.getTid();
+ if (prevtid != tid) {
+ columnId = 1;
+ prevtid = tid;
+ }
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column colObj = columns.get(fieldId);
+
+ if ("tid".equalsIgnoreCase(colObj.getSimpleName())) {
+ if (column.hasTid()) {
+ aTuple.put(fieldId, DatumFactory.createInt4(tid));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ } else if ("column_name".equalsIgnoreCase(colObj.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(column.getName()));
+ } else if ("ordinal_position".equalsIgnoreCase(colObj.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(columnId));
+ } else if ("data_type".equalsIgnoreCase(colObj.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(column.getDataType().getType().toString()));
+ } else if ("type_length".equalsIgnoreCase(colObj.getSimpleName())) {
+ DataType dataType = column.getDataType();
+ if (dataType.hasLength()) {
+ aTuple.put(fieldId, DatumFactory.createInt4(dataType.getLength()));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ }
+ }
+
+ columnId++;
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getIndexes(Schema outSchema) {
+ List<IndexProto> indexList = masterContext.getCatalog().getAllIndexes();
+ List<Tuple> tuples = new ArrayList<Tuple>(indexList.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (IndexProto index: indexList) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+
+ if ("db_id".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(index.getDbId()));
+ } else if ("tid".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(index.getTId()));
+ } else if ("index_name".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(index.getIndexName()));
+ } else if ("column_name".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(index.getColumnName()));
+ } else if ("data_type".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(index.getDataType()));
+ } else if ("index_type".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(index.getIndexType()));
+ } else if ("is_unique".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createBool(index.getIsUnique()));
+ } else if ("is_clustered".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createBool(index.getIsClustered()));
+ } else if ("is_ascending".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createBool(index.getIsAscending()));
+ }
+ }
+
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getAllTableOptions(Schema outSchema) {
+ List<TableOptionProto> optionList = masterContext.getCatalog().getAllTableOptions();
+ List<Tuple> tuples = new ArrayList<Tuple>(optionList.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (TableOptionProto option: optionList) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+
+ if ("tid".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(option.getTid()));
+ } else if ("key_".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(option.getKeyval().getKey()));
+ } else if ("value_".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(option.getKeyval().getValue()));
+ }
+ }
+
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getAllTableStats(Schema outSchema) {
+ List<TableStatsProto> statList = masterContext.getCatalog().getAllTableStats();
+ List<Tuple> tuples = new ArrayList<Tuple>(statList.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (TableStatsProto stat: statList) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+
+ if ("tid".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(stat.getTid()));
+ } else if ("num_rows".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt8(stat.getNumRows()));
+ } else if ("num_bytes".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt8(stat.getNumBytes()));
+ }
+ }
+
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> getAllPartitions(Schema outSchema) {
+ List<TablePartitionProto> partitionList = masterContext.getCatalog().getAllPartitions();
+ List<Tuple> tuples = new ArrayList<Tuple>(partitionList.size());
+ List<Column> columns = outSchema.getColumns();
+ Tuple aTuple;
+
+ for (TablePartitionProto partition: partitionList) {
+ aTuple = new VTuple(outSchema.size());
+
+ for (int fieldId = 0; fieldId < columns.size(); fieldId++) {
+ Column column = columns.get(fieldId);
+
+ if ("pid".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(partition.getPid()));
+ } else if ("tid".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(partition.getTid()));
+ } else if ("partition_name".equalsIgnoreCase(column.getSimpleName())) {
+ if (partition.hasPartitionName()) {
+ aTuple.put(fieldId, DatumFactory.createText(partition.getPartitionName()));
+ } else {
+ aTuple.put(fieldId, DatumFactory.createNullDatum());
+ }
+ } else if ("ordinal_position".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createInt4(partition.getOrdinalPosition()));
+ } else if ("path".equalsIgnoreCase(column.getSimpleName())) {
+ aTuple.put(fieldId, DatumFactory.createText(partition.getPath()));
+ }
+ }
+
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private List<Tuple> fetchSystemTable(TableDesc tableDesc, Schema inSchema) {
+ List<Tuple> tuples = null;
+ String tableName = CatalogUtil.extractSimpleName(tableDesc.getName());
+
+ if ("tablespace".equalsIgnoreCase(tableName)) {
+ tuples = getTablespaces(inSchema);
+ } else if ("databases".equalsIgnoreCase(tableName)) {
+ tuples = getDatabases(inSchema);
+ } else if ("tables".equalsIgnoreCase(tableName)) {
+ tuples = getTables(inSchema);
+ } else if ("columns".equalsIgnoreCase(tableName)) {
+ tuples = getColumns(inSchema);
+ } else if ("indexes".equalsIgnoreCase(tableName)) {
+ tuples = getIndexes(inSchema);
+ } else if ("table_options".equalsIgnoreCase(tableName)) {
+ tuples = getAllTableOptions(inSchema);
+ } else if ("table_stats".equalsIgnoreCase(tableName)) {
+ tuples = getAllTableStats(inSchema);
+ } else if ("partitions".equalsIgnoreCase(tableName)) {
+ tuples = getAllPartitions(inSchema);
+ }
+
+ return tuples;
+ }
+
+ @Override
+ public List<ByteString> getNextRows(int fetchRowNum) throws IOException {
+ List<ByteString> rows = new ArrayList<ByteString>();
+ int startRow = currentRow;
+ int endRow = startRow + fetchRowNum;
+
+ if (physicalExec == null) {
+ return rows;
+ }
+
+ while (currentRow < endRow) {
+ Tuple currentTuple = physicalExec.next();
+
+ if (currentTuple == null) {
+ physicalExec.close();
+ physicalExec = null;
+ break;
+ }
+
+ currentRow++;
+ rows.add(ByteString.copyFrom(encoder.toBytes(currentTuple)));
+
+ if (currentRow >= maxRow) {
+ physicalExec.close();
+ physicalExec = null;
+ break;
+ }
+ }
+
+ return rows;
+ }
+
+ @Override
+ public QueryId getQueryId() {
+ return queryId;
+ }
+
+ @Override
+ public String getSessionId() {
+ return sessionId;
+ }
+
+ @Override
+ public TableDesc getTableDesc() {
+ return tableDesc;
+ }
+
+ @Override
+ public Schema getLogicalSchema() {
+ return outSchema;
+ }
+
+ class SimplePhysicalPlannerImpl extends PhysicalPlannerImpl {
+
+ public SimplePhysicalPlannerImpl(TajoConf conf) {
+ super(conf);
+ }
+
+ @Override
+ public PhysicalExec createScanPlan(TaskAttemptContext ctx, ScanNode scanNode, Stack<LogicalNode> node)
+ throws IOException {
+ return new SystemPhysicalExec(ctx, scanNode);
+ }
+
+ @Override
+ public PhysicalExec createIndexScanExec(TaskAttemptContext ctx, IndexScanNode annotation) throws IOException {
+ return new SystemPhysicalExec(ctx, annotation);
+ }
+ }
+
+ class SystemPhysicalExec extends PhysicalExec {
+
+ private ScanNode scanNode;
+ private EvalNode qual;
+ private Projector projector;
+ private TableStats tableStats;
+ private final List<Tuple> cachedData;
+ private int currentRow;
+ private boolean isClosed;
+
+ public SystemPhysicalExec(TaskAttemptContext context, ScanNode scanNode) {
+ super(context, scanNode.getInSchema(), scanNode.getOutSchema());
+ this.scanNode = scanNode;
+ this.qual = this.scanNode.getQual();
+ cachedData = TUtil.newList();
+ currentRow = 0;
+ isClosed = false;
+
+ projector = new Projector(context, inSchema, outSchema, scanNode.getTargets());
+ }
+
+ @Override
+ public Tuple next() throws IOException {
+ Tuple aTuple = null;
+ Tuple outTuple = new VTuple(outColumnNum);
+
+ if (isClosed) {
+ return null;
+ }
+
+ if (cachedData.size() == 0) {
+ rescan();
+ }
+
+ if (!scanNode.hasQual()) {
+ if (currentRow < cachedData.size()) {
+ aTuple = cachedData.get(currentRow++);
+ projector.eval(aTuple, outTuple);
+ outTuple.setOffset(aTuple.getOffset());
+ return outTuple;
+ }
+ return null;
+ } else {
+ while (currentRow < cachedData.size()) {
+ aTuple = cachedData.get(currentRow++);
+ if (qual.eval(inSchema, aTuple).isTrue()) {
+ projector.eval(aTuple, outTuple);
+ return outTuple;
+ }
+ }
+ return null;
+ }
+ }
+
+ @Override
+ public void rescan() throws IOException {
+ cachedData.clear();
+ cachedData.addAll(fetchSystemTable(scanNode.getTableDesc(), inSchema));
+
+ tableStats = new TableStats();
+ tableStats.setNumRows(cachedData.size());
+ }
+
+ @Override
+ public void close() throws IOException {
+ scanNode = null;
+ qual = null;
+ projector = null;
+ cachedData.clear();
+ currentRow = -1;
+ isClosed = true;
+ }
+
+ @Override
+ public float getProgress() {
+ return 1.0f;
+ }
+
+ @Override
+ protected void compile() throws CompilationError {
+ if (scanNode.hasQual()) {
+ qual = context.getPrecompiledEval(inSchema, qual);
+ }
+ }
+
+ @Override
+ public TableStats getInputStats() {
+ return tableStats;
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java b/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
index ee99353..c413b65 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/TajoMasterClientService.java
@@ -525,7 +525,7 @@ public class TajoMasterClientService extends AbstractService {
List<ByteString> rows = queryResultScanner.getNextRows(request.getFetchRowNum());
SerializedResultSet.Builder resultSetBuilder = SerializedResultSet.newBuilder();
- resultSetBuilder.setSchema(queryResultScanner.getTableDesc().getLogicalSchema().getProto());
+ resultSetBuilder.setSchema(queryResultScanner.getLogicalSchema().getProto());
resultSetBuilder.addAllSerializedTuples(rows);
builder.setResultSet(resultSetBuilder.build());
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
index 10701f9..2242445 100644
--- a/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
+++ b/tajo-core/src/main/java/org/apache/tajo/master/exec/QueryExecutor.java
@@ -41,7 +41,9 @@ import org.apache.tajo.engine.planner.physical.StoreTableExec;
import org.apache.tajo.engine.query.QueryContext;
import org.apache.tajo.ipc.ClientProtos;
import org.apache.tajo.ipc.ClientProtos.SubmitQueryResponse;
+import org.apache.tajo.master.NonForwardQueryResultFileScanner;
import org.apache.tajo.master.NonForwardQueryResultScanner;
+import org.apache.tajo.master.NonForwardQueryResultSystemScanner;
import org.apache.tajo.master.TajoMaster;
import org.apache.tajo.master.exec.prehook.CreateTableHook;
import org.apache.tajo.master.exec.prehook.DistributedQueryHookManager;
@@ -104,6 +106,8 @@ public class QueryExecutor {
} else if (plan.isExplain()) { // explain query
execExplain(plan, response);
+ } else if (PlannerUtil.checkIfQueryTargetIsVirtualTable(plan)) {
+ execQueryOnVirtualTable(queryContext, session, sql, plan, response);
// Simple query indicates a form of 'select * from tb_name [LIMIT X];'.
} else if (PlannerUtil.checkIfSimpleQuery(plan)) {
@@ -183,6 +187,27 @@ public class QueryExecutor {
response.setQueryId(QueryIdFactory.NULL_QUERY_ID.getProto());
}
+ public void execQueryOnVirtualTable(QueryContext queryContext, Session session, String query, LogicalPlan plan,
+ SubmitQueryResponse.Builder response) throws Exception {
+ int maxRow = Integer.MAX_VALUE;
+ if (plan.getRootBlock().hasNode(NodeType.LIMIT)) {
+ LimitNode limitNode = plan.getRootBlock().getNode(NodeType.LIMIT);
+ maxRow = (int) limitNode.getFetchFirstNum();
+ }
+ QueryId queryId = QueryIdFactory.newQueryId(context.getResourceManager().getSeedQueryId());
+
+ NonForwardQueryResultScanner queryResultScanner =
+ new NonForwardQueryResultSystemScanner(context, plan, queryId, session.getSessionId(), maxRow);
+
+ queryResultScanner.init();
+ session.addNonForwardQueryResultScanner(queryResultScanner);
+
+ response.setQueryId(queryId.getProto());
+ response.setMaxRowNum(maxRow);
+ response.setTableDesc(queryResultScanner.getTableDesc().getProto());
+ response.setResultCode(ClientProtos.ResultCode.OK);
+ }
+
public void execSimpleQuery(QueryContext queryContext, Session session, String query, LogicalPlan plan,
SubmitQueryResponse.Builder response) throws Exception {
ScanNode scanNode = plan.getRootBlock().getNode(NodeType.SCAN);
@@ -202,7 +227,7 @@ public class QueryExecutor {
QueryId queryId = QueryIdFactory.newQueryId(context.getResourceManager().getSeedQueryId());
NonForwardQueryResultScanner queryResultScanner =
- new NonForwardQueryResultScanner(context.getConf(), session.getSessionId(), queryId, scanNode, desc, maxRow);
+ new NonForwardQueryResultFileScanner(context.getConf(), session.getSessionId(), queryId, scanNode, desc, maxRow);
queryResultScanner.init();
session.addNonForwardQueryResultScanner(queryResultScanner);
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-core/src/test/java/org/apache/tajo/master/TestNonForwardQueryResultSystemScanner.java
----------------------------------------------------------------------
diff --git a/tajo-core/src/test/java/org/apache/tajo/master/TestNonForwardQueryResultSystemScanner.java b/tajo-core/src/test/java/org/apache/tajo/master/TestNonForwardQueryResultSystemScanner.java
new file mode 100644
index 0000000..bdd6dfc
--- /dev/null
+++ b/tajo-core/src/test/java/org/apache/tajo/master/TestNonForwardQueryResultSystemScanner.java
@@ -0,0 +1,296 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.tajo.master;
+
+import static org.junit.Assert.*;
+import static org.hamcrest.CoreMatchers.*;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.tajo.LocalTajoTestingUtility;
+import org.apache.tajo.QueryId;
+import org.apache.tajo.QueryIdFactory;
+import org.apache.tajo.TajoTestingCluster;
+import org.apache.tajo.algebra.Expr;
+import org.apache.tajo.benchmark.TPCH;
+import org.apache.tajo.catalog.Schema;
+import org.apache.tajo.common.TajoDataTypes.Type;
+import org.apache.tajo.conf.TajoConf;
+import org.apache.tajo.datum.Datum;
+import org.apache.tajo.engine.parser.SQLAnalyzer;
+import org.apache.tajo.engine.query.QueryContext;
+import org.apache.tajo.master.TajoMaster.MasterContext;
+import org.apache.tajo.plan.LogicalOptimizer;
+import org.apache.tajo.plan.LogicalPlan;
+import org.apache.tajo.plan.LogicalPlanner;
+import org.apache.tajo.plan.logical.LimitNode;
+import org.apache.tajo.plan.logical.NodeType;
+import org.apache.tajo.storage.RowStoreUtil;
+import org.apache.tajo.storage.StorageConstants;
+import org.apache.tajo.storage.RowStoreUtil.RowStoreDecoder;
+import org.apache.tajo.storage.Tuple;
+import org.apache.tajo.util.KeyValueSet;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeDiagnosingMatcher;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.protobuf.ByteString;
+
+public class TestNonForwardQueryResultSystemScanner {
+
+ private class CollectionMatcher<T> extends TypeSafeDiagnosingMatcher<Iterable<? extends T>> {
+
+ private final Matcher<? extends T> matcher;
+
+ public CollectionMatcher(Matcher<? extends T> matcher) {
+ this.matcher = matcher;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("a collection containing ").appendDescriptionOf(this.matcher);
+ }
+
+ @Override
+ protected boolean matchesSafely(Iterable<? extends T> item, Description mismatchDescription) {
+ boolean isFirst = true;
+ Iterator<? extends T> iterator = item.iterator();
+
+ while (iterator.hasNext()) {
+ T obj = iterator.next();
+ if (this.matcher.matches(obj)) {
+ return true;
+ }
+
+ if (!isFirst) {
+ mismatchDescription.appendText(", ");
+ }
+
+ this.matcher.describeMismatch(obj, mismatchDescription);
+ isFirst = false;
+ }
+ return false;
+ }
+
+ }
+
+ private <T> Matcher<Iterable<? extends T>> hasItem(Matcher<? extends T> matcher) {
+ return new CollectionMatcher<T>(matcher);
+ }
+
+ private static LocalTajoTestingUtility testUtil;
+ private static TajoTestingCluster testingCluster;
+ private static TajoConf conf;
+ private static MasterContext masterContext;
+
+ private static SQLAnalyzer analyzer;
+ private static LogicalPlanner logicalPlanner;
+ private static LogicalOptimizer logicalOptimizer;
+
+ private static void setupTestingCluster() throws Exception {
+ testUtil = new LocalTajoTestingUtility();
+ String[] names, paths;
+ Schema[] schemas;
+
+ TPCH tpch = new TPCH();
+ tpch.loadSchemas();
+ tpch.loadQueries();
+
+ names = new String[] {"customer", "lineitem", "nation", "orders", "part", "partsupp",
+ "region", "supplier", "empty_orders"};
+ schemas = new Schema[names.length];
+ for (int i = 0; i < names.length; i++) {
+ schemas[i] = tpch.getSchema(names[i]);
+ }
+
+ File file;
+ paths = new String[names.length];
+ for (int i = 0; i < names.length; i++) {
+ file = new File("src/test/tpch/" + names[i] + ".tbl");
+ if(!file.exists()) {
+ file = new File(System.getProperty("user.dir") + "/tajo-core/src/test/tpch/" + names[i]
+ + ".tbl");
+ }
+ paths[i] = file.getAbsolutePath();
+ }
+
+ KeyValueSet opt = new KeyValueSet();
+ opt.set(StorageConstants.TEXT_DELIMITER, StorageConstants.DEFAULT_FIELD_DELIMITER);
+ testUtil.setup(names, paths, schemas, opt);
+
+ testingCluster = testUtil.getTestingCluster();
+ }
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ setupTestingCluster();
+
+ conf = testingCluster.getConfiguration();
+ masterContext = testingCluster.getMaster().getContext();
+
+ GlobalEngine globalEngine = masterContext.getGlobalEngine();
+ analyzer = globalEngine.getAnalyzer();
+ logicalPlanner = globalEngine.getLogicalPlanner();
+ logicalOptimizer = globalEngine.getLogicalOptimizer();
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ try {
+ Thread.sleep(2000);
+ } catch (Exception ignored) {
+ }
+
+ testUtil.shutdown();
+ }
+
+ private NonForwardQueryResultScanner getScanner(String sql) throws Exception {
+ QueryId queryId = QueryIdFactory.newQueryId(masterContext.getResourceManager().getSeedQueryId());
+ String sessionId = UUID.randomUUID().toString();
+
+ return getScanner(sql, queryId, sessionId);
+ }
+
+ private NonForwardQueryResultScanner getScanner(String sql, QueryId queryId, String sessionId) throws Exception {
+ QueryContext queryContext = LocalTajoTestingUtility.createDummyContext(conf);
+
+ Expr expr = analyzer.parse(sql);
+ LogicalPlan logicalPlan = logicalPlanner.createPlan(queryContext, expr);
+ logicalOptimizer.optimize(logicalPlan);
+
+ int maxRow = Integer.MAX_VALUE;
+ if (logicalPlan.getRootBlock().hasNode(NodeType.LIMIT)) {
+ LimitNode limitNode = logicalPlan.getRootBlock().getNode(NodeType.LIMIT);
+ maxRow = (int) limitNode.getFetchFirstNum();
+ }
+
+ NonForwardQueryResultScanner queryResultScanner =
+ new NonForwardQueryResultSystemScanner(masterContext, logicalPlan, queryId,
+ sessionId, maxRow);
+
+ return queryResultScanner;
+ }
+
+ @Test
+ public void testInit() throws Exception {
+ QueryId queryId = QueryIdFactory.newQueryId(masterContext.getResourceManager().getSeedQueryId());
+ String sessionId = UUID.randomUUID().toString();
+ NonForwardQueryResultScanner queryResultScanner =
+ getScanner("SELECT SPACE_ID, SPACE_URI FROM INFORMATION_SCHEMA.TABLESPACE",
+ queryId, sessionId);
+
+ queryResultScanner.init();
+
+ assertThat(queryResultScanner.getQueryId(), is(notNullValue()));
+ assertThat(queryResultScanner.getLogicalSchema(), is(notNullValue()));
+ assertThat(queryResultScanner.getSessionId(), is(notNullValue()));
+ assertThat(queryResultScanner.getTableDesc(), is(notNullValue()));
+
+ assertThat(queryResultScanner.getQueryId(), is(queryId));
+ assertThat(queryResultScanner.getSessionId(), is(sessionId));
+
+ assertThat(queryResultScanner.getLogicalSchema().size(), is(2));
+ assertThat(queryResultScanner.getLogicalSchema().getColumn("space_id"), is(notNullValue()));
+ }
+
+ private List<Tuple> getTupleList(RowStoreDecoder decoder, List<ByteString> bytes) {
+ List<Tuple> tuples = new ArrayList<Tuple>(bytes.size());
+
+ for (ByteString byteString: bytes) {
+ Tuple aTuple = decoder.toTuple(byteString.toByteArray());
+ tuples.add(aTuple);
+ }
+
+ return tuples;
+ }
+
+ private <T> Matcher<Tuple> getTupleMatcher(final int fieldId, final Matcher<T> matcher) {
+ return new TypeSafeDiagnosingMatcher<Tuple>() {
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendDescriptionOf(matcher);
+ }
+
+ @Override
+ protected boolean matchesSafely(Tuple item, Description mismatchDescription) {
+ Datum datum = item.get(fieldId);
+ Object itemValue = null;
+
+ if (datum.type() == Type.TEXT) {
+ itemValue = datum.asChars();
+ } else if (datum.type() == Type.INT4) {
+ itemValue = datum.asInt4();
+ } else if (datum.type() == Type.INT8) {
+ itemValue = datum.asInt8();
+ }
+
+ if (itemValue != null && matcher.matches(itemValue)) {
+ return true;
+ }
+
+ matcher.describeMismatch(itemValue, mismatchDescription);
+ return false;
+ }
+ };
+ }
+
+ @Test
+ public void testGetNextRowsForAggregateFunction() throws Exception {
+ NonForwardQueryResultScanner queryResultScanner =
+ getScanner("SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES");
+
+ queryResultScanner.init();
+
+ List<ByteString> rowBytes = queryResultScanner.getNextRows(100);
+
+ assertThat(rowBytes.size(), is(1));
+
+ RowStoreDecoder decoder = RowStoreUtil.createDecoder(queryResultScanner.getLogicalSchema());
+ List<Tuple> tuples = getTupleList(decoder, rowBytes);
+
+ assertThat(tuples.size(), is(1));
+ assertThat(tuples, hasItem(getTupleMatcher(0, is(9L))));
+ }
+
+ @Test
+ public void testGetNextRowsForTable() throws Exception {
+ NonForwardQueryResultScanner queryResultScanner =
+ getScanner("SELECT TABLE_NAME, TABLE_TYPE FROM INFORMATION_SCHEMA.TABLES");
+
+ queryResultScanner.init();
+
+ List<ByteString> rowBytes = queryResultScanner.getNextRows(100);
+
+ assertThat(rowBytes.size(), is(9));
+
+ RowStoreDecoder decoder = RowStoreUtil.createDecoder(queryResultScanner.getLogicalSchema());
+ List<Tuple> tuples = getTupleList(decoder, rowBytes);;
+
+ assertThat(tuples.size(), is(9));
+ assertThat(tuples, hasItem(getTupleMatcher(0, is("lineitem"))));
+ }
+}
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
index eebee6f..9002f28 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/LogicalPlanner.java
@@ -37,6 +37,7 @@ import org.apache.tajo.algebra.WindowSpec;
import org.apache.tajo.catalog.*;
import org.apache.tajo.catalog.partition.PartitionMethodDesc;
import org.apache.tajo.catalog.proto.CatalogProtos;
+import org.apache.tajo.catalog.proto.CatalogProtos.StoreType;
import org.apache.tajo.common.TajoDataTypes;
import org.apache.tajo.datum.NullDatum;
import org.apache.tajo.plan.LogicalPlan.QueryBlock;
@@ -1314,7 +1315,7 @@ public class LogicalPlanner extends BaseAlgebraVisitor<LogicalPlanner.PlanContex
}
private void updatePhysicalInfo(TableDesc desc) {
- if (desc.getPath() != null) {
+ if (desc.getPath() != null && desc.getMeta().getStoreType() != StoreType.SYSTEM) {
try {
Path path = new Path(desc.getPath());
FileSystem fs = path.getFileSystem(new Configuration());
http://git-wip-us.apache.org/repos/asf/tajo/blob/021a6f0b/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
----------------------------------------------------------------------
diff --git a/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java b/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
index d813432..0fbd359 100644
--- a/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
+++ b/tajo-plan/src/main/java/org/apache/tajo/plan/util/PlannerUtil.java
@@ -139,6 +139,27 @@ public class PlannerUtil {
(simpleOperator && noComplexComputation && isOneQueryBlock &&
noOrderBy && noGroupBy && noWhere && noJoin && singleRelation);
}
+
+ /**
+ * Checks whether the target of this query is a virtual table or not.
+ * It will be removed after tajo storage supports catalog service access.
+ *
+ */
+ public static boolean checkIfQueryTargetIsVirtualTable(LogicalPlan plan) {
+ LogicalRootNode rootNode = plan.getRootBlock().getRoot();
+
+ boolean hasScanNode = plan.getRootBlock().hasNode(NodeType.SCAN);
+ LogicalNode[] scanNodes = findAllNodes(rootNode, NodeType.SCAN);
+ boolean isVirtualTable = scanNodes.length > 0;
+ ScanNode scanNode = null;
+
+ for (LogicalNode node: scanNodes) {
+ scanNode = (ScanNode) node;
+ isVirtualTable &= (scanNode.getTableDesc().getMeta().getStoreType() == StoreType.SYSTEM);
+ }
+
+ return !checkIfDDLPlan(rootNode) && hasScanNode && isVirtualTable;
+ }
/**
* Checks whether the query has 'from clause' or not.