You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/06/24 08:32:31 UTC

svn commit: r1605013 [1/3] - in /hive/branches/cbo: ./ common/src/java/org/apache/hadoop/hive/conf/ conf/ ql/ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/...

Author: hashutosh
Date: Tue Jun 24 06:32:30 2014
New Revision: 1605013

URL: http://svn.apache.org/r1605013
Log:
HIVE-5775 : Introduce Cost Based Optimizer to Hive (Initial patch) - John P via Ashutosh Chauhan)

Added:
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CostBasedOptimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveSwapJoinRule.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/CBOTableStatsValidator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/DerivedTableInjector.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java
Modified:
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/cbo/conf/hive-default.xml.template
    hive/branches/cbo/pom.xml
    hive/branches/cbo/ql/pom.xml
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java

Modified: hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1605013&r1=1605012&r2=1605013&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Tue Jun 24 06:32:30 2014
@@ -440,6 +440,19 @@ public class HiveConf extends Configurat
     HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000),
     HIVEJOINCACHESIZE("hive.join.cache.size", 25000),
 
+    // CBO related
+    /*
+     * Flag to control enabling Cost Based Optimizations using Optiq framework.
+     */
+    HIVE_CBO_ENABLED("hive.cbo.enable", false),
+    /*
+     * Control queries that will be considered for join reordering, based on
+     * number of joins in them. Beyond a certain number of joins, the cost of
+     * considering possible permutations is prohibitive.
+     */
+    HIVE_CBO_MAX_JOINS_SUPPORTED("hive.cbo.max.joins.supported", 10),
+    HIVE_CBO_PULLPROJECTABOVEJOIN_RULE("hive.cbo.project.pullabovejoin.rule", false),
+
     // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
     // need to remove by hive .13. Also, do not change default (see SMB operator)
     HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100),

Modified: hive/branches/cbo/conf/hive-default.xml.template
URL: http://svn.apache.org/viewvc/hive/branches/cbo/conf/hive-default.xml.template?rev=1605013&r1=1605012&r2=1605013&view=diff
==============================================================================
--- hive/branches/cbo/conf/hive-default.xml.template (original)
+++ hive/branches/cbo/conf/hive-default.xml.template Tue Jun 24 06:32:30 2014
@@ -547,6 +547,23 @@
 </property>
 
 <property>
+  <name>hive.cbo.enable</name>
+  <value>false</value>
+  <description>
+    Flag to control enabling Cost Based Optimizations using Optiq framework.
+  </description>
+</property>
+
+<property>
+  <name>hive.cbo.max.joins.supported</name>
+  <value>10</value>
+  <description>
+    Control queries that will be considered for join reordering, based on number of joins in them.
+    Beyond a certain number of joins, the cost of considering possible permutations is prohibitive.
+  </description>
+</property>
+
+<property>
   <name>hive.mapred.supports.subdirectories</name>
   <value>false</value>
   <description>Whether the version of Hadoop which is running supports sub-directories for tables/partitions.

Modified: hive/branches/cbo/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/pom.xml?rev=1605013&r1=1605012&r2=1605013&view=diff
==============================================================================
--- hive/branches/cbo/pom.xml (original)
+++ hive/branches/cbo/pom.xml Tue Jun 24 06:32:30 2014
@@ -199,6 +199,17 @@
          <enabled>false</enabled>
        </snapshots>
      </repository>
+     <repository>
+       <id>conjars</id>
+       <name>Optiq Conjars repository</name>
+       <url>http://conjars.org/repo</url>
+       <layout>default</layout>
+       <releases>
+          <enabled>true</enabled>
+          <updatePolicy>always</updatePolicy>
+          <checksumPolicy>warn</checksumPolicy>
+       </releases>
+     </repository>
   </repositories>
 
   <!-- Hadoop dependency management is done at the bottom under profiles -->

Modified: hive/branches/cbo/ql/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/pom.xml?rev=1605013&r1=1605012&r2=1605013&view=diff
==============================================================================
--- hive/branches/cbo/ql/pom.xml (original)
+++ hive/branches/cbo/ql/pom.xml Tue Jun 24 06:32:30 2014
@@ -28,6 +28,7 @@
   <name>Hive Query Language</name>
 
   <properties>
+    <optiq.version>0.5</optiq.version>
     <hive.path.to.root>..</hive.path.to.root>
   </properties>
 
@@ -182,6 +183,24 @@
       <version>${datanucleus-core.version}</version>
     </dependency>
     <dependency>
+      <groupId>net.hydromatic</groupId>
+      <artifactId>optiq-core</artifactId>
+      <version>${optiq.version}</version>
+      <exclusions>
+        <!-- hsqldb interferes with the use of derby as the default db
+          in hive's use of datanucleus. 
+        -->
+        <exclusion>
+          <groupId>org.hsqldb</groupId>
+          <artifactId>hsqldb</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-databind</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>   
+    <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
       <version>${guava.version}</version>

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java?rev=1605013&r1=1605012&r2=1605013&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java Tue Jun 24 06:32:30 2014
@@ -48,12 +48,25 @@ public class QueryProperties {
   boolean mapJoinRemoved = false;
   boolean hasMapGroupBy = false;
 
+  private int noOfJoins = 0;
+  private int noOfOuterJoins = 0;
+
   public boolean hasJoin() {
-    return hasJoin;
+    return (noOfJoins > 0);
+  }
+
+  public void incrementJoinCount(boolean noOuterJoin) {
+    noOfJoins++;
+    if (!noOuterJoin)
+      noOfOuterJoins++;
+  }
+
+  public int getJoinCount() {
+    return noOfJoins;
   }
 
-  public void setHasJoin(boolean hasJoin) {
-    this.hasJoin = hasJoin;
+  public int getOuterJoinCount() {
+    return noOfOuterJoins;
   }
 
   public boolean hasGroupBy() {
@@ -143,4 +156,25 @@ public class QueryProperties {
   public void setHasMapGroupBy(boolean hasMapGroupBy) {
     this.hasMapGroupBy = hasMapGroupBy;
   }
+
+  public void clear() {
+    hasJoin = false;
+    hasGroupBy = false;
+    hasOrderBy = false;
+    hasSortBy = false;
+    hasJoinFollowedByGroupBy = false;
+    hasPTF = false;
+    hasWindowing = false;
+
+    // does the query have a using clause
+    usesScript = false;
+
+    hasDistributeBy = false;
+    hasClusterBy = false;
+    mapJoinRemoved = false;
+    hasMapGroupBy = false;
+
+    noOfJoins = 0;
+    noOfOuterJoins = 0;
+  }
 }

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CostBasedOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CostBasedOptimizer.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CostBasedOptimizer.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CostBasedOptimizer.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,219 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0  
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. 
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+
+import net.hydromatic.optiq.SchemaPlus;
+import net.hydromatic.optiq.tools.Frameworks;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.QueryProperties;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.optimizer.optiq.HiveDefaultRelMetadataProvider;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveVolcanoPlanner;
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel;
+import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HiveMergeProjectRule;
+import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePullUpProjectsAboveJoinRule;
+import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePushJoinThroughJoinRule;
+import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HiveSwapJoinRule;
+import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ASTConverter;
+import org.apache.hadoop.hive.ql.optimizer.optiq.translator.RelNodeConverter;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.plan.api.OperatorType;
+import org.eigenbase.rel.RelCollationImpl;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.metadata.CachingRelMetadataProvider;
+import org.eigenbase.rel.metadata.ChainedRelMetadataProvider;
+import org.eigenbase.rel.metadata.RelMetadataProvider;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelOptQuery;
+import org.eigenbase.relopt.RelOptSchema;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.rex.RexBuilder;
+
+/* 
+ * Entry point to Optimizations using Optiq.  
+ */
+public class CostBasedOptimizer implements Frameworks.PlannerAction<RelNode> {
+  private static final Set<OperatorType> m_unsupportedOpTypes = ImmutableSet.of(OperatorType.DEMUX,
+                                                                  OperatorType.FORWARD,
+                                                                  OperatorType.LATERALVIEWFORWARD,
+                                                                  OperatorType.LATERALVIEWJOIN,
+                                                                  OperatorType.MUX,
+                                                                  OperatorType.PTF,
+                                                                  OperatorType.SCRIPT,
+                                                                  OperatorType.UDTF,
+                                                                  OperatorType.UNION);
+
+  @SuppressWarnings("rawtypes")
+  private final Operator                 m_sinkOp;
+  private final SemanticAnalyzer         m_semanticAnalyzer;
+  private final ParseContext             m_ParseContext;
+
+  public CostBasedOptimizer(@SuppressWarnings("rawtypes") Operator sinkOp,
+      SemanticAnalyzer semanticAnalyzer, ParseContext pCtx) {
+    m_sinkOp = sinkOp;
+    m_semanticAnalyzer = semanticAnalyzer;
+    m_ParseContext = pCtx;
+  }
+
+  /*
+   * Currently contract is given a Hive Operator Tree, it returns an optimal
+   * plan as an Hive AST.
+   */
+  public static ASTNode optimize(@SuppressWarnings("rawtypes") Operator sinkOp,
+      SemanticAnalyzer semanticAnalyzer, ParseContext pCtx, List<FieldSchema> resultSchema) {
+    ASTNode optiqOptimizedAST = null;
+    RelNode optimizedOptiqPlan = Frameworks.withPlanner(new CostBasedOptimizer(sinkOp,
+        semanticAnalyzer, pCtx));
+    optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema);
+
+    return optiqOptimizedAST;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlus schema) {
+    RelOptPlanner planner = HiveVolcanoPlanner.createPlanner();
+
+    /*
+     * recreate cluster, so that it picks up the additional traitDef
+     */
+    final RelOptQuery query = new RelOptQuery(planner);
+    final RexBuilder rexBuilder = cluster.getRexBuilder();
+    cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder);
+    List<RelMetadataProvider> list = Lists.newArrayList();
+    list.add(HiveDefaultRelMetadataProvider.INSTANCE);
+    planner.registerMetadataProviders(list);
+
+    RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
+    cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, planner));
+
+    RelNode opTreeInOptiq = RelNodeConverter.convert(m_sinkOp, cluster, relOptSchema,
+        m_semanticAnalyzer, m_ParseContext);
+
+    planner.clearRules();
+    planner.addRule(HiveSwapJoinRule.INSTANCE);
+    planner.addRule(HivePushJoinThroughJoinRule.LEFT);
+    planner.addRule(HivePushJoinThroughJoinRule.RIGHT);
+    if (HiveConf.getBoolVar(m_ParseContext.getConf(),
+        HiveConf.ConfVars.HIVE_CBO_PULLPROJECTABOVEJOIN_RULE)) {
+      planner.addRule(HivePullUpProjectsAboveJoinRule.BOTH_PROJECT);
+      planner.addRule(HivePullUpProjectsAboveJoinRule.LEFT_PROJECT);
+      planner.addRule(HivePullUpProjectsAboveJoinRule.RIGHT_PROJECT);
+      planner.addRule(HiveMergeProjectRule.INSTANCE);
+    }
+
+    RelTraitSet desiredTraits = cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+
+    RelNode rootRel = opTreeInOptiq;
+    if (!rootRel.getTraitSet().equals(desiredTraits)) {
+      rootRel = planner.changeTraits(opTreeInOptiq, desiredTraits);
+    }
+    planner.setRoot(rootRel);
+
+    return planner.findBestExp();
+  }
+
+  public static boolean canHandleOpTree(@SuppressWarnings("rawtypes") Operator sinkOp, HiveConf conf,
+      QueryProperties qp) {
+    boolean runOptiq = false;
+
+    if ((qp.getJoinCount() > 1) && (qp.getJoinCount() < HiveConf.getIntVar(conf,
+        HiveConf.ConfVars.HIVE_CBO_MAX_JOINS_SUPPORTED))
+        && (qp.getOuterJoinCount() == 0)
+        && !qp.hasClusterBy() && !qp.hasDistributeBy() && !qp.hasSortBy() && !qp.hasWindowing()) {
+      @SuppressWarnings("rawtypes")
+      final HashSet<Operator> start = new HashSet<Operator>();
+
+      start.add(sinkOp);
+      // TODO: use queryproperties instead of walking the tree
+      if (!CostBasedOptimizer.operatorExists(start, true, m_unsupportedOpTypes)) {
+        runOptiq = true;
+      }
+    }
+
+    return runOptiq;
+  }
+
+  /*
+   * TODO: moved this out of OperatorUtils for now HIVE-6403 is going to bring
+   * in iterateParents: https://reviews.apache.org/r/18137/diff/#index_header
+   * Will just use/enhance that once it is in. hb 2/15
+   */
+  /**
+   * Check if operator tree, in the direction specified forward/backward,
+   * contains any operator specified in the targetOPTypes.
+   * 
+   * @param start
+   *          list of operators to start checking from
+   * @param backward
+   *          direction of DAG traversal; if true implies get parent ops for
+   *          traversal otherwise children will be used
+   * @param targetOPTypes
+   *          Set of operator types to look for
+   * 
+   * @return true if any of the operator or its parent/children is of the name
+   *         specified in the targetOPTypes
+   * 
+   *         NOTE: 1. This employs breadth first search 2. By using HashSet for
+   *         "start" we avoid revisiting same operator twice. However it doesn't
+   *         prevent revisiting the same node more than once for some complex
+   *         dags.
+   */
+  @SuppressWarnings("unchecked")
+  public static boolean operatorExists(@SuppressWarnings("rawtypes") final HashSet<Operator> start,
+      final boolean backward, final Set<OperatorType> targetOPTypes) {
+    @SuppressWarnings("rawtypes")
+    HashSet<Operator> nextSetOfOperators = new HashSet<Operator>();
+
+    for (@SuppressWarnings("rawtypes")
+    Operator op : start) {
+      if (targetOPTypes.contains(op.getType())) {
+        return true;
+      }
+
+      if (backward) {
+        if (op.getParentOperators() != null) {
+          nextSetOfOperators.addAll(op.getParentOperators());
+        }
+      } else {
+        if (op.getChildOperators() != null) {
+          nextSetOfOperators.addAll(op.getChildOperators());
+        }
+      }
+    }
+
+    if (!nextSetOfOperators.isEmpty()) {
+      return operatorExists(nextSetOfOperators, backward, targetOPTypes);
+    }
+
+    return false;
+  }
+
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PreCBOOptimizer.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.optimizer.lineage.Generator;
+import org.apache.hadoop.hive.ql.optimizer.pcr.PartitionConditionRemover;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
+import org.apache.hadoop.hive.ql.optimizer.stats.annotation.AnnotateWithStatistics;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.ppd.PredicatePushDown;
+import org.apache.hadoop.hive.ql.ppd.PredicateTransitivePropagate;
+
+/*
+ * do PredicatePushdown, PartitionPruning and Column Pruning before CBO 
+ */
+public class PreCBOOptimizer {
+  private ParseContext    pctx;
+  private List<Transform> transformations;
+
+  /**
+   * Create the list of transformations.
+   * 
+   * @param hiveConf
+   */
+  public void initialize(HiveConf hiveConf) {
+    transformations = new ArrayList<Transform>();
+    // Add the transformation that computes the lineage information.
+    transformations.add(new Generator());
+    transformations.add(new PredicateTransitivePropagate());
+    transformations.add(new PredicatePushDown());
+    transformations.add(new PartitionPruner());
+    transformations.add(new PartitionConditionRemover());
+    transformations.add(new ColumnPruner());
+    transformations.add(new AnnotateWithStatistics());
+  }
+
+  /**
+   * Invoke all the transformations one-by-one, and alter the query plan.
+   * 
+   * @return ParseContext
+   * @throws SemanticException
+   */
+  public ParseContext optimize() throws SemanticException {
+    for (Transform t : transformations) {
+      pctx = t.transform(pctx);
+    }
+    return pctx;
+  }
+
+  /**
+   * @return the pctx
+   */
+  public ParseContext getPctx() {
+    return pctx;
+  }
+
+  /**
+   * @param pctx
+   *          the pctx to set
+   */
+  public void setPctx(ParseContext pctx) {
+    this.pctx = pctx;
+  }
+
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,27 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdDistinctRowCount;
+import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdSelectivity;
+import org.eigenbase.rel.metadata.ChainedRelMetadataProvider;
+import org.eigenbase.rel.metadata.DefaultRelMetadataProvider;
+import org.eigenbase.rel.metadata.RelMetadataProvider;
+
+/**
+ * Distinct row count and Selectivity is overridden for Hive.<br>
+ * <p>
+ * Distinct Row Count is overridden for:<br>
+ * 1) Join 2) TableScan.<br>
+ * Selectivity is overridden for:<br>
+ * 1) Join 2) TableScan & Filter.
+ */
+public class HiveDefaultRelMetadataProvider {
+  private HiveDefaultRelMetadataProvider() {
+  }
+
+  public static final RelMetadataProvider INSTANCE = ChainedRelMetadataProvider.of(ImmutableList
+                                                       .of(HiveRelMdDistinctRowCount.SOURCE,
+                                                           HiveRelMdSelectivity.SOURCE,
+                                                           new DefaultRelMetadataProvider()));
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,51 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+
+import org.eigenbase.rex.RexInputRef;
+import org.eigenbase.rex.RexNode;
+
+/**
+ * Generic utility functions needed for Optiq based Hive CBO.
+ */
+
+public class HiveOptiqUtil {
+
+  /**
+   * Get list of virtual columns from the given list of projections.
+   * <p>
+   * 
+   * @param exps
+   *          list of rex nodes representing projections
+   * @return List of Virtual Columns, will not be null.
+   */
+  public static List<Integer> getVirtualCols(List<RexNode> exps) {
+    List<Integer> vCols = new ArrayList<Integer>();
+
+    for (int i = 0; i < exps.size(); i++) {
+      if (!(exps.get(i) instanceof RexInputRef)) {
+        vCols.add(i);
+      }
+    }
+
+    return vCols;
+  }
+
+  public static List<Integer> translateBitSetToProjIndx(BitSet projBitSet) {
+    List<Integer> projIndxLst = new ArrayList<Integer>();
+
+    for (int i = 0; i < projBitSet.length(); i++) {
+      if (projBitSet.get(i)) {
+        projIndxLst.add(i);
+      }
+    }
+
+    return projIndxLst;
+  }
+
+  @Deprecated
+  public static void todo(String s) {
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/JoinUtil.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,295 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel;
+import org.eigenbase.relopt.RelOptUtil;
+import org.eigenbase.relopt.RelOptUtil.InputReferencedVisitor;
+import org.eigenbase.rex.RexNode;
+import org.eigenbase.sql.SqlKind;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+/**
+ * Utility for inspecting Join Conditions.<br>
+ * <p>
+ * Main Elements:<br>
+ * 1. JoinPredicateInfo - represents Join Condition.<br>
+ * 2. JoinLeafPredicateInfo - represents leaf predicates with in join condition.
+ * 
+ * TODO: Move this to Optiq Framework
+ */
+public class JoinUtil {
+
+  /**
+   * JoinPredicateInfo represents Join condition; JoinPredicate Info uses
+   * JoinLeafPredicateInfo to represent individual conjunctive elements in the
+   * predicate.<br>
+   * JoinPredicateInfo = JoinLeafPredicateInfo1 and JoinLeafPredicateInfo2...<br>
+   * <p>
+   * JoinPredicateInfo:<br>
+   * 1. preserves the order of conjuctive elements for
+   * equi-join(m_equiJoinPredicateElements)<br>
+   * 2. Stores set of projection indexes from left and right child which is part
+   * of equi join keys; the indexes are both in child and Join node schema.<br>
+   * 3. Keeps a map of projection indexes that are part of join keys to list of
+   * conjuctive elements(JoinLeafPredicateInfo) that uses them.
+   * 
+   */
+  public static class JoinPredicateInfo {
+    private final ImmutableList<JoinLeafPredicateInfo>                        m_nonEquiJoinPredicateElements;
+    private final ImmutableList<JoinLeafPredicateInfo>                        m_equiJoinPredicateElements;
+    private final ImmutableSet<Integer>                                       m_projsFromLeftPartOfJoinKeysInChildSchema;
+    private final ImmutableSet<Integer>                                       m_projsFromRightPartOfJoinKeysInChildSchema;
+    private final ImmutableSet<Integer>                                       m_projsFromRightPartOfJoinKeysInJoinSchema;
+    private final ImmutableMap<Integer, ImmutableList<JoinLeafPredicateInfo>> m_mapOfProjIndxInJoinSchemaToLeafPInfo;
+
+    public JoinPredicateInfo(List<JoinLeafPredicateInfo> nonEquiJoinPredicateElements,
+        List<JoinLeafPredicateInfo> equiJoinPredicateElements,
+        Set<Integer> projsFromLeftPartOfJoinKeysInChildSchema,
+        Set<Integer> projsFromRightPartOfJoinKeysInChildSchema,
+        Set<Integer> projsFromRightPartOfJoinKeysInJoinSchema,
+        Map<Integer, ImmutableList<JoinLeafPredicateInfo>> mapOfProjIndxInJoinSchemaToLeafPInfo) {
+      m_nonEquiJoinPredicateElements = ImmutableList.copyOf(nonEquiJoinPredicateElements);
+      m_equiJoinPredicateElements = ImmutableList.copyOf(equiJoinPredicateElements);
+      m_projsFromLeftPartOfJoinKeysInChildSchema = ImmutableSet
+          .copyOf(projsFromLeftPartOfJoinKeysInChildSchema);
+      m_projsFromRightPartOfJoinKeysInChildSchema = ImmutableSet
+          .copyOf(projsFromRightPartOfJoinKeysInChildSchema);
+      m_projsFromRightPartOfJoinKeysInJoinSchema = ImmutableSet
+          .copyOf(projsFromRightPartOfJoinKeysInJoinSchema);
+      m_mapOfProjIndxInJoinSchemaToLeafPInfo = ImmutableMap
+          .copyOf(mapOfProjIndxInJoinSchemaToLeafPInfo);
+    }
+
+    public List<JoinLeafPredicateInfo> getNonEquiJoinPredicateElements() {
+      return m_nonEquiJoinPredicateElements;
+    }
+
+    public List<JoinLeafPredicateInfo> getEquiJoinPredicateElements() {
+      return m_equiJoinPredicateElements;
+    }
+
+    public Set<Integer> getProjsFromLeftPartOfJoinKeysInChildSchema() {
+      return m_projsFromLeftPartOfJoinKeysInChildSchema;
+    }
+
+    public Set<Integer> getProjsFromRightPartOfJoinKeysInChildSchema() {
+      return m_projsFromRightPartOfJoinKeysInChildSchema;
+    }
+
+    /**
+     * NOTE: Join Schema = left Schema + (right Schema offset by
+     * left.fieldcount). Hence its ok to return projections from left in child
+     * schema.
+     */
+    public Set<Integer> getProjsFromLeftPartOfJoinKeysInJoinSchema() {
+      return m_projsFromLeftPartOfJoinKeysInChildSchema;
+    }
+
+    public Set<Integer> getProjsFromRightPartOfJoinKeysInJoinSchema() {
+      return m_projsFromRightPartOfJoinKeysInJoinSchema;
+    }
+
+    public Map<Integer, ImmutableList<JoinLeafPredicateInfo>> getMapOfProjIndxToLeafPInfo() {
+      return m_mapOfProjIndxInJoinSchemaToLeafPInfo;
+    }
+
+    public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoinRel j) {
+      return constructJoinPredicateInfo(j, j.getCondition());
+    }
+
+    public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoinRel j, RexNode predicate) {
+      JoinPredicateInfo jpi = null;
+      JoinLeafPredicateInfo jlpi = null;
+      List<JoinLeafPredicateInfo> equiLPIList = new ArrayList<JoinLeafPredicateInfo>();
+      List<JoinLeafPredicateInfo> nonEquiLPIList = new ArrayList<JoinLeafPredicateInfo>();
+      Set<Integer> projsFromLeftPartOfJoinKeys = new HashSet<Integer>();
+      Set<Integer> projsFromRightPartOfJoinKeys = new HashSet<Integer>();
+      Set<Integer> projsFromRightPartOfJoinKeysInJoinSchema = new HashSet<Integer>();
+      Map<Integer, List<JoinLeafPredicateInfo>> tmpMapOfProjIndxInJoinSchemaToLeafPInfo = new HashMap<Integer, List<JoinLeafPredicateInfo>>();
+      Map<Integer, ImmutableList<JoinLeafPredicateInfo>> mapOfProjIndxInJoinSchemaToLeafPInfo = new HashMap<Integer, ImmutableList<JoinLeafPredicateInfo>>();
+      List<JoinLeafPredicateInfo> tmpJLPILst = null;
+      int rightOffSet = j.getLeft().getRowType().getFieldCount();
+      int projIndxInJoin;
+      List<RexNode> conjuctiveElements;
+
+      todo("Move this to Optiq");
+
+      // 1. Decompose Join condition to a number of leaf predicates
+      // (conjuctive elements)
+      conjuctiveElements = RelOptUtil.conjunctions(predicate);
+
+      // 2. Walk through leaf predicates building up JoinLeafPredicateInfo
+      for (RexNode ce : conjuctiveElements) {
+        // 2.1 Construct JoinLeafPredicateInfo
+        jlpi = JoinLeafPredicateInfo.constructJoinLeafPredicateInfo(j, ce);
+
+        // 2.2 Classify leaf predicate as Equi vs Non Equi
+        if (jlpi.m_comparisonType.equals(SqlKind.EQUALS)) {
+          equiLPIList.add(jlpi);
+        } else {
+          nonEquiLPIList.add(jlpi);
+        }
+
+        // 2.3 Maintain join keys coming from left vs right (in child &
+        // Join Schema)
+        projsFromLeftPartOfJoinKeys.addAll(jlpi.getProjsFromLeftPartOfJoinKeysInChildSchema());
+        projsFromRightPartOfJoinKeys.addAll(jlpi.getProjsFromRightPartOfJoinKeysInChildSchema());
+        projsFromRightPartOfJoinKeysInJoinSchema.addAll(jlpi
+            .getProjsFromRightPartOfJoinKeysInJoinSchema());
+
+        // 2.4 Update Join Key to JoinLeafPredicateInfo map with keys
+        // from left
+        for (Integer projIndx : jlpi.getProjsFromLeftPartOfJoinKeysInChildSchema()) {
+          tmpJLPILst = tmpMapOfProjIndxInJoinSchemaToLeafPInfo.get(projIndx);
+          if (tmpJLPILst == null)
+            tmpJLPILst = new ArrayList<JoinLeafPredicateInfo>();
+          tmpJLPILst.add(jlpi);
+          tmpMapOfProjIndxInJoinSchemaToLeafPInfo.put(projIndx, tmpJLPILst);
+        }
+
+        // 2.5 Update Join Key to JoinLeafPredicateInfo map with keys
+        // from right
+        for (Integer projIndx : jlpi.getProjsFromRightPartOfJoinKeysInChildSchema()) {
+          projIndxInJoin = projIndx + rightOffSet;
+          tmpJLPILst = tmpMapOfProjIndxInJoinSchemaToLeafPInfo.get(projIndxInJoin);
+          if (tmpJLPILst == null)
+            tmpJLPILst = new ArrayList<JoinLeafPredicateInfo>();
+          tmpJLPILst.add(jlpi);
+          tmpMapOfProjIndxInJoinSchemaToLeafPInfo.put(projIndxInJoin, tmpJLPILst);
+        }
+
+      }
+
+      // 3. Update Update Join Key to List<JoinLeafPredicateInfo> to use
+      // ImmutableList
+      for (Entry<Integer, List<JoinLeafPredicateInfo>> e : tmpMapOfProjIndxInJoinSchemaToLeafPInfo
+          .entrySet()) {
+        mapOfProjIndxInJoinSchemaToLeafPInfo.put(e.getKey(), ImmutableList.copyOf(e.getValue()));
+      }
+
+      // 4. Construct JoinPredicateInfo
+      jpi = new JoinPredicateInfo(nonEquiLPIList, equiLPIList, projsFromLeftPartOfJoinKeys,
+          projsFromRightPartOfJoinKeys, projsFromRightPartOfJoinKeysInJoinSchema,
+          mapOfProjIndxInJoinSchemaToLeafPInfo);
+      return jpi;
+    }
+  }
+
+  /**
+   * JoinLeafPredicateInfo represents leaf predicate in Join condition
+   * (conjuctive lement).<br>
+   * <p>
+   * JoinLeafPredicateInfo:<br>
+   * 1. Stores list of expressions from left and right child which is part of
+   * equi join keys.<br>
+   * 2. Stores set of projection indexes from left and right child which is part
+   * of equi join keys; the indexes are both in child and Join node schema.<br>
+   */
+  public static class JoinLeafPredicateInfo {
+    private final SqlKind                m_comparisonType;
+    private final ImmutableList<RexNode> m_joinKeyExprsFromLeft;
+    private final ImmutableList<RexNode> m_joinKeyExprsFromRight;
+    private final ImmutableSet<Integer>  m_projsFromLeftPartOfJoinKeysInChildSchema;
+    private final ImmutableSet<Integer>  m_projsFromRightPartOfJoinKeysInChildSchema;
+    private final ImmutableSet<Integer>  m_projsFromRightPartOfJoinKeysInJoinSchema;
+
+    public JoinLeafPredicateInfo(SqlKind comparisonType, List<RexNode> joinKeyExprsFromLeft,
+        List<RexNode> joinKeyExprsFromRight, Set<Integer> projsFromLeftPartOfJoinKeysInChildSchema,
+        Set<Integer> projsFromRightPartOfJoinKeysInChildSchema,
+        Set<Integer> projsFromRightPartOfJoinKeysInJoinSchema) {
+      m_comparisonType = comparisonType;
+      m_joinKeyExprsFromLeft = ImmutableList.copyOf(joinKeyExprsFromLeft);
+      m_joinKeyExprsFromRight = ImmutableList.copyOf(joinKeyExprsFromRight);
+      m_projsFromLeftPartOfJoinKeysInChildSchema = ImmutableSet
+          .copyOf(projsFromLeftPartOfJoinKeysInChildSchema);
+      m_projsFromRightPartOfJoinKeysInChildSchema = ImmutableSet
+          .copyOf(projsFromRightPartOfJoinKeysInChildSchema);
+      m_projsFromRightPartOfJoinKeysInJoinSchema = ImmutableSet
+          .copyOf(projsFromRightPartOfJoinKeysInJoinSchema);
+    }
+
+    public List<RexNode> getJoinKeyExprsFromLeft() {
+      return m_joinKeyExprsFromLeft;
+    }
+
+    public List<RexNode> getJoinKeyExprsFromRight() {
+      return m_joinKeyExprsFromRight;
+    }
+
+    public Set<Integer> getProjsFromLeftPartOfJoinKeysInChildSchema() {
+      return m_projsFromLeftPartOfJoinKeysInChildSchema;
+    }
+
+    /**
+     * NOTE: Join Schema = left Schema + (right Schema offset by
+     * left.fieldcount). Hence its ok to return projections from left in child
+     * schema.
+     */
+    public Set<Integer> getProjsFromLeftPartOfJoinKeysInJoinSchema() {
+      return m_projsFromLeftPartOfJoinKeysInChildSchema;
+    }
+
+    public Set<Integer> getProjsFromRightPartOfJoinKeysInChildSchema() {
+      return m_projsFromRightPartOfJoinKeysInChildSchema;
+    }
+
+    public Set<Integer> getProjsFromRightPartOfJoinKeysInJoinSchema() {
+      return m_projsFromRightPartOfJoinKeysInJoinSchema;
+    }
+
+    public static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(HiveJoinRel j, RexNode pe) {
+      JoinLeafPredicateInfo jlpi = null;
+      List<Integer> filterNulls = new ArrayList<Integer>();
+      List<RexNode> joinKeyExprsFromLeft = new ArrayList<RexNode>();
+      List<RexNode> joinKeyExprsFromRight = new ArrayList<RexNode>();
+      Set<Integer> projsFromLeftPartOfJoinKeysInChildSchema = new HashSet<Integer>();
+      Set<Integer> projsFromRightPartOfJoinKeysInChildSchema = new HashSet<Integer>();
+      Set<Integer> projsFromRightPartOfJoinKeysInJoinSchema = new HashSet<Integer>();
+      int rightOffSet = j.getLeft().getRowType().getFieldCount();
+
+      todo("Move this to Optiq");
+
+      // 1. Split leaf join predicate to expressions from left, right
+      @SuppressWarnings("unused")
+      RexNode nonEquiPredicate = RelOptUtil.splitJoinCondition(j.getSystemFieldList(), j.getLeft(),
+          j.getRight(), pe, joinKeyExprsFromLeft, joinKeyExprsFromRight, filterNulls, null);
+
+      // 2. For left expressions, collect child projection indexes used
+      InputReferencedVisitor irvLeft = new InputReferencedVisitor();
+      irvLeft.apply(joinKeyExprsFromLeft);
+      projsFromLeftPartOfJoinKeysInChildSchema.addAll(irvLeft.inputPosReferenced);
+
+      // 3. For right expressions, collect child projection indexes used
+      InputReferencedVisitor irvRight = new InputReferencedVisitor();
+      irvRight.apply(joinKeyExprsFromRight);
+      projsFromRightPartOfJoinKeysInChildSchema.addAll(irvRight.inputPosReferenced);
+
+      // 3. Translate projection indexes from right to join schema, by adding
+      // offset.
+      for (Integer indx : projsFromRightPartOfJoinKeysInChildSchema) {
+        projsFromRightPartOfJoinKeysInJoinSchema.add(indx + rightOffSet);
+      }
+
+      // 4. Construct JoinLeafPredicateInfo
+      jlpi = new JoinLeafPredicateInfo(pe.getKind(), joinKeyExprsFromLeft, joinKeyExprsFromRight,
+          projsFromLeftPartOfJoinKeysInChildSchema, projsFromRightPartOfJoinKeysInChildSchema,
+          projsFromRightPartOfJoinKeysInJoinSchema);
+
+      return jlpi;
+    }
+  }
+
+  @Deprecated
+  public static void todo(String s) {
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/Pair.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,19 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq;
+
+public class Pair<T1, T2> {
+  private final T1 m_first;
+  private final T2 m_second;
+
+  public Pair(T1 first, T2 second) {
+    m_first = first;
+    m_second = second;
+  }
+
+  public T1 getFirst() {
+    return m_first;
+  }
+
+  public T2 getSecond() {
+    return m_second;
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,67 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq;
+
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.plan.Statistics;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.TableAccessRel;
+import org.eigenbase.relopt.RelOptAbstractTable;
+import org.eigenbase.relopt.RelOptSchema;
+import org.eigenbase.reltype.RelDataType;
+
+//Fix Me: use table meta data and stats util to get stats
+public class RelOptHiveTable extends RelOptAbstractTable {
+  private final Table       m_hiveTblMetadata;
+  private double            m_rowCount           = -1;
+
+  final Map<String, Double> m_columnIdxToSizeMap = new HashMap<String, Double>();
+
+  Map<String, Integer>      m_bucketingColMap;
+  Map<String, Integer>      m_bucketingSortColMap;
+
+  Statistics                m_hiveStats;
+
+  // NOTE: name here is the table alias which may or may not be the real name in
+  // metadata. Use
+  // m_hiveTblMetadata.getTableName() for table name and
+  // m_hiveTblMetadata.getDbName() for db name.
+  public RelOptHiveTable(RelOptSchema schema, String name, RelDataType rowType,
+      Table hiveTblMetadata, Statistics stats) {
+    super(schema, name, rowType);
+    m_hiveTblMetadata = hiveTblMetadata;
+    m_hiveStats = stats;
+
+    m_rowCount = stats.getNumRows();
+  }
+
+  @Override
+  public boolean isKey(BitSet arg0) {
+    return false;
+  }
+
+  @Override
+  public RelNode toRel(ToRelContext context) {
+    return new TableAccessRel(context.getCluster(), this);
+  }
+
+  @Override
+  public <T> T unwrap(Class<T> arg0) {
+    return arg0.isInstance(this) ? arg0.cast(this) : null;
+  }
+
+  @Override
+  public double getRowCount() {
+    return m_rowCount;
+  }
+
+  public Table getHiveTableMD() {
+    return m_hiveTblMetadata;
+  }
+
+  public Statistics getHiveStats() {
+    return m_hiveStats;
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,50 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel;
+import org.eigenbase.rel.AggregateCall;
+import org.eigenbase.rel.RelCollation;
+import org.eigenbase.rel.RelCollationImpl;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.rex.RexNode;
+
+public class TraitsUtil {
+
+  public static RelTraitSet getSelectTraitSet(RelOptCluster cluster, List<RexNode> exps,
+      RelNode child) {
+    return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+  }
+
+  public static RelTraitSet getSortTraitSet(RelOptCluster cluster, RelTraitSet traitSet,
+      RelCollation collation) {
+    return traitSet.plus(collation);
+  }
+
+  public static RelTraitSet getFilterTraitSet(RelOptCluster cluster, RelTraitSet traitSet,
+      RelNode child) {
+    return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+  }
+
+  public static RelTraitSet getLimitTraitSet(RelOptCluster cluster, RelTraitSet traitSet,
+      RelNode child) {
+    return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+  }
+
+  public static RelTraitSet getAggregateTraitSet(RelOptCluster cluster, RelTraitSet traitSet,
+      List<Integer> gbCols, List<AggregateCall> aggCalls, RelNode child) {
+    return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+  }
+
+  public static RelTraitSet getTableScanTraitSet(RelOptCluster cluster, RelTraitSet traitSet,
+      RelOptHiveTable table, RelDataType rowtype) {
+    return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+  }
+
+  public static RelTraitSet getJoinTraitSet(RelOptCluster cluster, RelTraitSet traitSet) {
+    return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,194 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.cost;
+
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptCostFactory;
+import org.eigenbase.relopt.RelOptUtil;
+
+// TODO: This should inherit from VolcanoCost and should just override isLE method.
+public class HiveCost implements RelOptCost {
+  // ~ Static fields/initializers ---------------------------------------------
+
+  static final HiveCost                 INFINITY = new HiveCost(Double.POSITIVE_INFINITY,
+                                                     Double.POSITIVE_INFINITY,
+                                                     Double.POSITIVE_INFINITY) {
+                                                   @Override
+                                                   public String toString() {
+                                                     return "{inf}";
+                                                   }
+                                                 };
+
+  static final HiveCost                 HUGE     = new HiveCost(Double.MAX_VALUE, Double.MAX_VALUE,
+                                                     Double.MAX_VALUE) {
+                                                   @Override
+                                                   public String toString() {
+                                                     return "{huge}";
+                                                   }
+                                                 };
+
+  static final HiveCost                 ZERO     = new HiveCost(0.0, 0.0, 0.0) {
+                                                   @Override
+                                                   public String toString() {
+                                                     return "{0}";
+                                                   }
+                                                 };
+
+  static final HiveCost                 TINY     = new HiveCost(1.0, 1.0, 0.0) {
+                                                   @Override
+                                                   public String toString() {
+                                                     return "{tiny}";
+                                                   }
+                                                 };
+
+  public static final RelOptCostFactory FACTORY  = new Factory();
+
+  // ~ Instance fields --------------------------------------------------------
+
+  final double                          cpu;
+  final double                          io;
+  final double                          rowCount;
+
+  // ~ Constructors -----------------------------------------------------------
+
+  HiveCost(double rowCount, double cpu, double io) {
+    assert rowCount >= 0d;
+    assert cpu >= 0d;
+    assert io >= 0d;
+    this.rowCount = rowCount;
+    this.cpu = cpu;
+    this.io = io;
+  }
+
+  // ~ Methods ----------------------------------------------------------------
+
+  public double getCpu() {
+    return cpu;
+  }
+
+  public boolean isInfinite() {
+    return (this == INFINITY) || (this.rowCount == Double.POSITIVE_INFINITY)
+        || (this.cpu == Double.POSITIVE_INFINITY) || (this.io == Double.POSITIVE_INFINITY);
+  }
+
+  public double getIo() {
+    return io;
+  }
+
+  // TODO: If two cost is equal, could we do any better than comparing
+  // cardinality (may be some other heuristics to break the tie)
+  public boolean isLe(RelOptCost other) {
+    return this == other || this.rowCount <= other.getRows();
+    /*
+     * if (((this.dCpu + this.dIo) < (other.getCpu() + other.getIo())) ||
+     * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo()) && this.dRows
+     * <= other.getRows())) { return true; } else { return false; }
+     */
+  }
+
+  public boolean isLt(RelOptCost other) {
+    return this.rowCount < other.getRows();
+    /*
+     * return isLe(other) && !equals(other);
+     */
+  }
+
+  public double getRows() {
+    return rowCount;
+  }
+
+  public boolean equals(RelOptCost other) {
+    return (this == other) || ((this.rowCount) == (other.getRows()));
+
+    /*
+     * //TODO: should we consider cardinality as well? return (this == other) ||
+     * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo()));
+     */
+  }
+
+  public boolean isEqWithEpsilon(RelOptCost other) {
+    return (this == other) || (Math.abs((this.rowCount) - (other.getRows())) < RelOptUtil.EPSILON);
+    /*
+     * return (this == other) || (Math.abs((this.dCpu + this.dIo) -
+     * (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON);
+     */
+  }
+
+  public RelOptCost minus(RelOptCost other) {
+    if (this == INFINITY) {
+      return this;
+    }
+
+    return new HiveCost(this.rowCount - other.getRows(), this.cpu - other.getCpu(), this.io
+        - other.getIo());
+  }
+
+  public RelOptCost multiplyBy(double factor) {
+    if (this == INFINITY) {
+      return this;
+    }
+    return new HiveCost(rowCount * factor, cpu * factor, io * factor);
+  }
+
+  public double divideBy(RelOptCost cost) {
+    // Compute the geometric average of the ratios of all of the factors
+    // which are non-zero and finite.
+    double d = 1;
+    double n = 0;
+    if ((this.rowCount != 0) && !Double.isInfinite(this.rowCount) && (cost.getRows() != 0)
+        && !Double.isInfinite(cost.getRows())) {
+      d *= this.rowCount / cost.getRows();
+      ++n;
+    }
+    if ((this.cpu != 0) && !Double.isInfinite(this.cpu) && (cost.getCpu() != 0)
+        && !Double.isInfinite(cost.getCpu())) {
+      d *= this.cpu / cost.getCpu();
+      ++n;
+    }
+    if ((this.io != 0) && !Double.isInfinite(this.io) && (cost.getIo() != 0)
+        && !Double.isInfinite(cost.getIo())) {
+      d *= this.io / cost.getIo();
+      ++n;
+    }
+    if (n == 0) {
+      return 1.0;
+    }
+    return Math.pow(d, 1 / n);
+  }
+
+  public RelOptCost plus(RelOptCost other) {
+    if ((this == INFINITY) || (other.isInfinite())) {
+      return INFINITY;
+    }
+    return new HiveCost(this.rowCount + other.getRows(), this.cpu + other.getCpu(), this.io
+        + other.getIo());
+  }
+
+  @Override
+  public String toString() {
+    return "{" + rowCount + " rows, " + cpu + " cpu, " + io + " io}";
+  }
+
+  private static class Factory implements RelOptCostFactory {
+    private Factory() {
+    }
+
+    public RelOptCost makeCost(double rowCount, double cpu, double io) {
+      return new HiveCost(rowCount, cpu, io);
+    }
+
+    public RelOptCost makeHugeCost() {
+      return HUGE;
+    }
+
+    public HiveCost makeInfiniteCost() {
+      return INFINITY;
+    }
+
+    public HiveCost makeTinyCost() {
+      return TINY;
+    }
+
+    public HiveCost makeZeroCost() {
+      return ZERO;
+    }
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,23 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.cost;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel;
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
+import org.eigenbase.relopt.RelOptCost;
+
+public class HiveCostUtil {
+  private static final double cpuCostInNanoSec          = 1.0;
+  private static final double netCostInNanoSec          = 150 * cpuCostInNanoSec;
+  private static final double localFSWriteCostInNanoSec = 4 * netCostInNanoSec;
+  private static final double localFSReadCostInNanoSec  = 4 * netCostInNanoSec;
+  private static final double hDFSWriteCostInNanoSec    = 10 * localFSWriteCostInNanoSec;
+  private static final double hDFSReadCostInNanoSec     = 1.5 * localFSReadCostInNanoSec;
+
+  public static RelOptCost computCardinalityBasedCost(HiveRel hr) {
+    return new HiveCost(hr.getRows(), 0, 0);
+  }
+
+  public static HiveCost computeCost(HiveTableScanRel t) {
+    double cardinality = t.getRows();
+    return new HiveCost(cardinality, 0, hDFSWriteCostInNanoSec * cardinality * 0);
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,32 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.cost;
+
+import org.eigenbase.rel.RelCollationTraitDef;
+import org.eigenbase.relopt.ConventionTraitDef;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.volcano.VolcanoPlanner;
+
+/**
+ * Refinement of {@link org.eigenbase.relopt.volcano.VolcanoPlanner} for Hive.
+ * 
+ * <p>
+ * It uses {@link org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost} as
+ * its cost model.
+ */
+public class HiveVolcanoPlanner extends VolcanoPlanner {
+  private static final boolean ENABLE_COLLATION_TRAIT = true;
+
+  /** Creates a HiveVolcanoPlanner. */
+  public HiveVolcanoPlanner() {
+    super(HiveCost.FACTORY);
+  }
+
+  public static RelOptPlanner createPlanner() {
+    final VolcanoPlanner planner = new HiveVolcanoPlanner();
+    planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
+    if (ENABLE_COLLATION_TRAIT) {
+      planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+      planner.registerAbstractRelationalRules();
+    }
+    return planner;
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,58 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import java.util.BitSet;
+import java.util.List;
+
+import net.hydromatic.optiq.util.BitSets;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
+import org.eigenbase.rel.AggregateCall;
+import org.eigenbase.rel.AggregateRelBase;
+import org.eigenbase.rel.InvalidRelException;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.metadata.RelMetadataQuery;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelTraitSet;
+
+public class HiveAggregateRel extends AggregateRelBase implements HiveRel {
+
+  public HiveAggregateRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
+      BitSet groupSet, List<AggregateCall> aggCalls) throws InvalidRelException {
+    super(cluster, TraitsUtil.getAggregateTraitSet(cluster, traitSet, BitSets.toList(groupSet),
+        aggCalls, child), child, groupSet, aggCalls);
+
+    for (AggregateCall aggCall : aggCalls) {
+      if (aggCall.isDistinct()) {
+        throw new InvalidRelException("distinct aggregation not supported");
+      }
+    }
+  }
+
+  public AggregateRelBase copy(RelTraitSet traitSet, RelNode input, BitSet groupSet,
+      List<AggregateCall> aggCalls) {
+    try {
+      return new HiveAggregateRel(getCluster(), traitSet, input, groupSet, aggCalls);
+    } catch (InvalidRelException e) {
+      // Semantic error not possible. Must be a bug. Convert to
+      // internal error.
+      throw new AssertionError(e);
+    }
+  }
+
+  public void implement(Implementor implementor) {
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner) {
+    return HiveCost.FACTORY.makeZeroCost();
+  }
+
+  @Override
+  public double getRows() {
+    return RelMetadataQuery.getDistinctRowCount(this, groupSet, getCluster().getRexBuilder()
+        .makeLiteral(true));
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,35 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
+import org.eigenbase.rel.FilterRelBase;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.rex.RexNode;
+
+public class HiveFilterRel extends FilterRelBase implements HiveRel {
+
+  public HiveFilterRel(RelOptCluster cluster, RelTraitSet traits, RelNode child, RexNode condition) {
+    super(cluster, TraitsUtil.getFilterTraitSet(cluster, traits, child), child, condition);
+  }
+
+  @Override
+  public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
+    assert traitSet.containsIfApplicable(HiveRel.CONVENTION);
+    return new HiveFilterRel(getCluster(), traitSet, sole(inputs), getCondition());
+  }
+
+  @Override
+  public void implement(Implementor implementor) {
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner) {
+    return HiveCost.FACTORY.makeZeroCost();
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,100 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCostUtil;
+import org.eigenbase.rel.InvalidRelException;
+import org.eigenbase.rel.JoinRelBase;
+import org.eigenbase.rel.JoinRelType;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelOptUtil;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.rex.RexNode;
+
+//TODO: Should we convert MultiJoin to be a child of HiveJoinRelBase
+public class HiveJoinRel extends JoinRelBase implements HiveRel {
+  // NOTE: COMMON_JOIN & SMB_JOIN are Sort Merge Join (in case of COMMON_JOIN
+  // each parallel computation handles multiple splits where as in case of SMB
+  // each parallel computation handles one bucket). MAP_JOIN and BUCKET_JOIN is
+  // hash joins where MAP_JOIN keeps the whole data set of non streaming tables
+  // in memory where as BUCKET_JOIN keeps only the b
+  public enum JoinAlgorithm {
+    NONE, COMMON_JOIN, MAP_JOIN, BUCKET_JOIN, SMB_JOIN
+  }
+
+  public enum MapJoinStreamingRelation {
+    NONE, LEFT_RELATION, RIGHT_RELATION
+  }
+
+  private final JoinAlgorithm      m_joinAlgorithm;
+  private MapJoinStreamingRelation m_mapJoinStreamingSide = MapJoinStreamingRelation.NONE;
+
+  public static HiveJoinRel getJoin(RelOptCluster cluster, RelNode left, RelNode right,
+      RexNode condition, JoinRelType joinType) {
+    try {
+      Set<String> variablesStopped = Collections.emptySet();
+      return new HiveJoinRel(cluster, null, left, right, condition, joinType, variablesStopped,
+          JoinAlgorithm.NONE, null);
+    } catch (InvalidRelException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  protected HiveJoinRel(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right,
+      RexNode condition, JoinRelType joinType, Set<String> variablesStopped,
+      JoinAlgorithm joinAlgo, MapJoinStreamingRelation streamingSideForMapJoin)
+      throws InvalidRelException {
+    super(cluster, TraitsUtil.getJoinTraitSet(cluster, traits), left, right, condition, joinType,
+        variablesStopped);
+
+    final List<RexNode> leftKeys = new ArrayList<RexNode>();
+    final List<RexNode> rightKeys = new ArrayList<RexNode>();
+    List<Integer> filterNulls = new LinkedList<Integer>();
+    RexNode remaining = RelOptUtil.splitJoinCondition(getSystemFieldList(), left, right, condition,
+        leftKeys, rightKeys, filterNulls, null);
+
+    if (!remaining.isAlwaysTrue()) {
+      throw new InvalidRelException("EnumerableJoinRel only supports equi-join");
+    }
+    this.m_joinAlgorithm = joinAlgo;
+  }
+
+  @Override
+  public void implement(Implementor implementor) {
+  }
+
+  @Override
+  public final HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left,
+      RelNode right, JoinRelType joinType) {
+    return copy(traitSet, conditionExpr, left, right, m_joinAlgorithm, m_mapJoinStreamingSide);
+  }
+
+  public HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left, RelNode right,
+      JoinAlgorithm joinalgo, MapJoinStreamingRelation streamingSide) {
+    try {
+      return new HiveJoinRel(getCluster(), traitSet, left, right, conditionExpr, joinType,
+          variablesStopped, joinalgo, streamingSide);
+    } catch (InvalidRelException e) {
+      // Semantic error not possible. Must be a bug. Convert to
+      // internal error.
+      throw new AssertionError(e);
+    }
+  }
+
+  public JoinAlgorithm getJoinAlgorithm() {
+    return m_joinAlgorithm;
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner) {
+    return HiveCostUtil.computCardinalityBasedCost(this);
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,40 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.SingleRel;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.rex.RexNode;
+
+public class HiveLimitRel extends SingleRel implements HiveRel {
+  private final RexNode offset;
+  private final RexNode fetch;
+
+  HiveLimitRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RexNode offset,
+      RexNode fetch) {
+    super(cluster, TraitsUtil.getLimitTraitSet(cluster, traitSet, child), child);
+    this.offset = offset;
+    this.fetch = fetch;
+    assert getConvention() instanceof HiveRel;
+    assert getConvention() == child.getConvention();
+  }
+
+  @Override
+  public HiveLimitRel copy(RelTraitSet traitSet, List<RelNode> newInputs) {
+    return new HiveLimitRel(getCluster(), traitSet, sole(newInputs), offset, fetch);
+  }
+
+  public void implement(Implementor implementor) {
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner) {
+    return HiveCost.FACTORY.makeZeroCost();
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,108 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import java.util.Collections;
+import java.util.List;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
+import org.eigenbase.rel.ProjectRelBase;
+import org.eigenbase.rel.RelCollation;
+import org.eigenbase.rel.RelFactories.ProjectFactory;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelOptRule;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.rex.RexNode;
+import org.eigenbase.rex.RexUtil;
+
+public class HiveProjectRel extends ProjectRelBase implements HiveRel {
+
+  public static final ProjectFactory DEFAULT_PROJECT_FACTORY = new HiveProjectFactoryImpl();
+
+  private final List<Integer>        m_virtualCols;
+
+  /**
+   * Creates a HiveProjectRel.
+   * 
+   * @param cluster
+   *          Cluster this relational expression belongs to
+   * @param child
+   *          input relational expression
+   * @param exps
+   *          List of expressions for the input columns
+   * @param rowType
+   *          output row type
+   * @param flags
+   *          values as in {@link ProjectRelBase.Flags}
+   */
+  public HiveProjectRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
+      List<RexNode> exps, RelDataType rowType, int flags) {
+    super(cluster, traitSet, child, exps, rowType, flags);
+    m_virtualCols = ImmutableList.copyOf(HiveOptiqUtil.getVirtualCols(exps));
+  }
+
+  /**
+   * Creates a HiveProjectRel with no sort keys.
+   * 
+   * @param child
+   *          input relational expression
+   * @param exps
+   *          set of expressions for the input columns
+   * @param fieldNames
+   *          aliases of the expressions
+   */
+  public static HiveProjectRel create(RelNode child, List<RexNode> exps, List<String> fieldNames) {
+    RelOptCluster cluster = child.getCluster();
+    RelDataType rowType = RexUtil.createStructType(cluster.getTypeFactory(), exps, fieldNames);
+    return create(cluster, child, exps, rowType, Collections.<RelCollation> emptyList());
+  }
+
+  /**
+   * Creates a HiveProjectRel.
+   */
+  public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List<RexNode> exps,
+      RelDataType rowType, final List<RelCollation> collationList) {
+    RelTraitSet traitSet = TraitsUtil.getSelectTraitSet(cluster, exps, child);
+    return new HiveProjectRel(cluster, traitSet, child, exps, rowType, Flags.BOXED);
+  }
+
+  public ProjectRelBase copy(RelTraitSet traitSet, RelNode input, List<RexNode> exps,
+      RelDataType rowType) {
+    assert traitSet.containsIfApplicable(HiveRel.CONVENTION);
+    return new HiveProjectRel(getCluster(), traitSet, input, exps, rowType, getFlags());
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner) {
+    return HiveCost.FACTORY.makeZeroCost();
+  }
+
+  public void implement(Implementor implementor) {
+  }
+
+  public List<Integer> getVirtualCols() {
+    return m_virtualCols;
+  }
+
+  /**
+   * Implementation of {@link ProjectFactory} that returns
+   * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel}
+   * .
+   */
+  private static class HiveProjectFactoryImpl implements ProjectFactory {
+    @Override
+    public RelNode createProject(RelNode input, List<RexNode> exps, List<String> fieldNames) {
+      RelNode project = HiveProjectRel.create(input, exps, fieldNames);
+
+      // Make sure extra traits are carried over from the original rel
+      project = RelOptRule.convert(project, input.getTraitSet());
+      return project;
+    }
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,19 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.relopt.Convention;
+
+public interface HiveRel extends RelNode {
+  void implement(Implementor implementor);
+
+  /** Calling convention for relational operations that occur in Hive. */
+  final Convention CONVENTION = new Convention.Impl("HIVE", HiveRel.class);
+
+  class Implementor {
+
+    public void visitChild(int ordinal, RelNode input) {
+      assert ordinal == 0;
+      ((HiveRel) input).implement(this);
+    }
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,37 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.eigenbase.rel.RelCollation;
+import org.eigenbase.rel.RelCollationImpl;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.SortRel;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.rex.RexNode;
+
+public class HiveSortRel extends SortRel implements HiveRel {
+
+  public HiveSortRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
+      RelCollation collation, RexNode offset, RexNode fetch) {
+    super(cluster, TraitsUtil.getSortTraitSet(cluster, traitSet, collation), child, collation,
+        offset, fetch);
+
+    assert getConvention() == child.getConvention();
+  }
+
+  @Override
+  public HiveSortRel copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation,
+      RexNode offset, RexNode fetch) {
+    // TODO: can we blindly copy sort trait? What if inputs changed and we
+    // are now sorting by different cols
+    RelCollation canonizedCollation = traitSet.canonize(newCollation);
+    return new HiveSortRel(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch);
+  }
+
+  public RexNode getFetchExpr() {
+    return fetch;
+  }
+
+  public void implement(Implementor implementor) {
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,94 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
+import org.apache.hadoop.hive.ql.plan.ColStatistics;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.TableAccessRelBase;
+import org.eigenbase.relopt.RelOptCluster;
+import org.eigenbase.relopt.RelOptCost;
+import org.eigenbase.relopt.RelOptPlanner;
+import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.reltype.RelDataType;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Relational expression representing a scan of a HiveDB collection.
+ *
+ * <p>
+ * Additional operations might be applied, using the "find" or "aggregate"
+ * methods.
+ * </p>
+ */
+public class HiveTableScanRel extends TableAccessRelBase implements HiveRel {
+  private final ImmutableList<ColStatistics> m_hiveColStat;
+
+  /**
+   * Creates a HiveTableScan.
+   *
+   * @param cluster
+   *          Cluster
+   * @param traitSet
+   *          Traits
+   * @param table
+   *          Table
+   * @param table
+   *          HiveDB table
+   */
+  public HiveTableScanRel(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
+      RelDataType rowtype) {
+    super(cluster, TraitsUtil.getTableScanTraitSet(cluster, traitSet, table, rowtype), table);
+    assert getConvention() == HiveRel.CONVENTION;
+
+    ImmutableList.Builder<ColStatistics> b = new ImmutableList.Builder<ColStatistics>();
+    for (String fN : rowtype.getFieldNames()) {
+      ColStatistics cStat = table.getHiveStats().getColumnStatisticsForColumn(
+          table.getName(), fN);
+      b.add(cStat);
+    }
+    m_hiveColStat = b.build();
+  }
+
+  @Override
+  public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
+    assert inputs.isEmpty();
+    return this;
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner) {
+    return HiveCost.FACTORY.makeZeroCost();
+  }
+
+  @Override
+  public void register(RelOptPlanner planner) {
+
+  }
+
+  public void implement(Implementor implementor) {
+
+  }
+
+  @Override
+  public double getRows() {
+    return ((RelOptHiveTable) table).getRowCount();
+  }
+
+  public List<ColStatistics> getColStat(List<Integer> projIndxLst) {
+    if (projIndxLst != null) {
+      List<ColStatistics> hiveColStatLst = new LinkedList<ColStatistics>();
+      for (Integer i : projIndxLst) {
+        hiveColStatLst.add(m_hiveColStat.get(i));
+      }
+      return hiveColStatLst;
+    } else {
+      return m_hiveColStat;
+    }
+  }
+
+}
\ No newline at end of file

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,12 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
+import org.eigenbase.rel.rules.MergeProjectRule;
+
+public class HiveMergeProjectRule extends MergeProjectRule {
+  public static final HiveMergeProjectRule INSTANCE = new HiveMergeProjectRule();
+
+  public HiveMergeProjectRule() {
+    super(true, HiveProjectRel.DEFAULT_PROJECT_FACTORY);
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePullUpProjectsAboveJoinRule.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,44 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel;
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
+import org.eigenbase.rel.ProjectRelBase;
+import org.eigenbase.rel.RelNode;
+import org.eigenbase.rel.rules.PullUpProjectsAboveJoinRule;
+import org.eigenbase.relopt.RelOptRuleOperand;
+
+public class HivePullUpProjectsAboveJoinRule extends PullUpProjectsAboveJoinRule {
+
+  public static final HivePullUpProjectsAboveJoinRule BOTH_PROJECT  = new HivePullUpProjectsAboveJoinRule(
+                                                                        operand(
+                                                                            HiveJoinRel.class,
+                                                                            operand(
+                                                                                ProjectRelBase.class,
+                                                                                any()),
+                                                                            operand(
+                                                                                ProjectRelBase.class,
+                                                                                any())),
+                                                                        "HivePullUpProjectsAboveJoinRule: with two HiveProjectRel children");
+
+  public static final HivePullUpProjectsAboveJoinRule LEFT_PROJECT  = new HivePullUpProjectsAboveJoinRule(
+                                                                        operand(
+                                                                            HiveJoinRel.class,
+                                                                            some(operand(
+                                                                                ProjectRelBase.class,
+                                                                                any()))),
+                                                                        "HivePullUpProjectsAboveJoinRule: with HiveProjectRel on left");
+
+  public static final HivePullUpProjectsAboveJoinRule RIGHT_PROJECT = new HivePullUpProjectsAboveJoinRule(
+                                                                        operand(
+                                                                            HiveJoinRel.class,
+                                                                            operand(RelNode.class,
+                                                                                any()),
+                                                                            operand(
+                                                                                ProjectRelBase.class,
+                                                                                any())),
+                                                                        "HivePullUpProjectsAboveJoinRule: with HiveProjectRel on right");
+
+  public HivePullUpProjectsAboveJoinRule(RelOptRuleOperand operand, String description) {
+    super(operand, description, HiveProjectRel.DEFAULT_PROJECT_FACTORY);
+  }
+}

Added: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java?rev=1605013&view=auto
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java (added)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushJoinThroughJoinRule.java Tue Jun 24 06:32:30 2014
@@ -0,0 +1,37 @@
+package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel;
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel.JoinAlgorithm;
+import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
+import org.eigenbase.rel.JoinRelBase;
+import org.eigenbase.rel.rules.PushJoinThroughJoinRule;
+import org.eigenbase.relopt.RelOptRule;
+import org.eigenbase.relopt.RelOptRuleCall;
+
+public class HivePushJoinThroughJoinRule extends PushJoinThroughJoinRule {
+  public static final RelOptRule RIGHT = new HivePushJoinThroughJoinRule(
+                                           "Hive PushJoinThroughJoinRule:right", true,
+                                           HiveJoinRel.class);
+  public static final RelOptRule LEFT  = new HivePushJoinThroughJoinRule(
+                                           "Hive PushJoinThroughJoinRule:left", false,
+                                           HiveJoinRel.class);
+
+  private HivePushJoinThroughJoinRule(String description, boolean right,
+      Class<? extends JoinRelBase> clazz) {
+    super(description, right, clazz, HiveProjectRel.DEFAULT_PROJECT_FACTORY);
+  }
+
+  @Override
+  public boolean matches(RelOptRuleCall call) {
+    boolean isAMatch = false;
+    final HiveJoinRel topJoin = call.rel(0);
+    final HiveJoinRel bottomJoin = call.rel(1);
+
+    if (topJoin.getJoinAlgorithm() == JoinAlgorithm.NONE
+        && bottomJoin.getJoinAlgorithm() == JoinAlgorithm.NONE) {
+      isAMatch = true;
+    }
+
+    return isAMatch;
+  }
+}