You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/09/30 02:14:27 UTC

[10/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java b/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java
deleted file mode 100644
index f055a58..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TSessionStateUtil.java
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.thrift.TSessionState;
-
-/**
- * Utility functions for working with TSessionState objects.
- */
-public class TSessionStateUtil {
-  /**
-   * Returns the actual user to perform authorisation against for the provided
-   * session. That is, returns the delegated user for a session if set, otherwise
-   * returns the connected user.
-   */
-  public static String getEffectiveUser(TSessionState session) {
-    if (session.getDelegated_user() != null &&
-        !session.getDelegated_user().isEmpty()) {
-      return session.getDelegated_user();
-    }
-    return session.getConnected_user();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java b/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java
deleted file mode 100644
index f0c69c5..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/UnsafeUtil.java
+++ /dev/null
@@ -1,68 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.lang.reflect.Field;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-
-import sun.misc.Unsafe;
-
-@SuppressWarnings("restriction")
-/**
- * Utility class to access unsafe methods.
- */
-public class UnsafeUtil {
-  // object to allow us to use unsafe APIs. This lets us read native memory without
-  // copies and not have to switch back and forth between little endian and big endian.
-  public static final Unsafe UNSAFE;
-
-  // This is the offset to the start of the array data. (There's some bytes
-  // before the array data like the size and other java stuff).
-  private static final int BYTE_ARRAY_DATA_OFFSET;
-
-  static {
-    UNSAFE = (Unsafe) AccessController.doPrivileged(
-        new PrivilegedAction<Object>() {
-          @Override
-          public Object run() {
-            try {
-              Field f = Unsafe.class.getDeclaredField("theUnsafe");
-              f.setAccessible(true);
-              return f.get(null);
-            } catch (NoSuchFieldException e) {
-              throw new Error();
-            } catch (IllegalAccessException e) {
-              throw new Error();
-            }
-          }
-        });
-
-    BYTE_ARRAY_DATA_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
-  }
-
-  // Copies src[srcOffset, srcOffset + len) into dst.
-  public static void Copy(long dst, byte[] src, int srcOffset, int len) {
-    UNSAFE.copyMemory(src, BYTE_ARRAY_DATA_OFFSET + srcOffset, null, dst, len);
-  }
-
-  // Copies src[0, len) into dst[dstOffset, dstOffset + len).
-  public static void Copy(byte[] dst, int dstOffset, long src, int len) {
-    UNSAFE.copyMemory(null, src, dst, dstOffset + BYTE_ARRAY_DATA_OFFSET, len);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/Visitor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/Visitor.java b/fe/src/main/java/com/cloudera/impala/util/Visitor.java
deleted file mode 100644
index ac7b8e3..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/Visitor.java
+++ /dev/null
@@ -1,27 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-/**
-* Interface for visitor pattern
-*/
-public interface Visitor<T> {
-
-  void visit(T a);
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
new file mode 100644
index 0000000..e8e9445
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java
@@ -0,0 +1,742 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.Type;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.planner.DataPartition;
+import com.cloudera.impala.thrift.TPartitionType;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Encapsulates all the information needed to compute the aggregate functions of a single
+ * Select block, including a possible 2nd phase aggregation step for DISTINCT aggregate
+ * functions and merge aggregation steps needed for distributed execution.
+ *
+ * The latter requires a tree structure of AggregateInfo objects which express the
+ * original aggregate computations as well as the necessary merging aggregate
+ * computations.
+ * TODO: get rid of this by transforming
+ *   SELECT COUNT(DISTINCT a, b, ..) GROUP BY x, y, ...
+ * into an equivalent query with a inline view:
+ *   SELECT COUNT(*) FROM (SELECT DISTINCT a, b, ..., x, y, ...) GROUP BY x, y, ...
+ *
+ * The tree structure looks as follows:
+ * - for non-distinct aggregation:
+ *   - aggInfo: contains the original aggregation functions and grouping exprs
+ *   - aggInfo.mergeAggInfo: contains the merging aggregation functions (grouping
+ *     exprs are identical)
+ * - for distinct aggregation (for an explanation of the phases, see
+ *   SelectStmt.createDistinctAggInfo()):
+ *   - aggInfo: contains the phase 1 aggregate functions and grouping exprs
+ *   - aggInfo.2ndPhaseDistinctAggInfo: contains the phase 2 aggregate functions and
+ *     grouping exprs
+ *   - aggInfo.mergeAggInfo: contains the merging aggregate functions for the phase 1
+ *     computation (grouping exprs are identical)
+ *   - aggInfo.2ndPhaseDistinctAggInfo.mergeAggInfo: contains the merging aggregate
+ *     functions for the phase 2 computation (grouping exprs are identical)
+ *
+ * In general, merging aggregate computations are idempotent; in other words,
+ * aggInfo.mergeAggInfo == aggInfo.mergeAggInfo.mergeAggInfo.
+ *
+ * TODO: move the merge construction logic from SelectStmt into AggregateInfo
+ * TODO: Add query tests for aggregation with intermediate tuples with num_nodes=1.
+ */
+public class AggregateInfo extends AggregateInfoBase {
+  private final static Logger LOG = LoggerFactory.getLogger(AggregateInfo.class);
+
+  public enum AggPhase {
+    FIRST,
+    FIRST_MERGE,
+    SECOND,
+    SECOND_MERGE;
+
+    public boolean isMerge() { return this == FIRST_MERGE || this == SECOND_MERGE; }
+  };
+
+  // created by createMergeAggInfo()
+  private AggregateInfo mergeAggInfo_;
+
+  // created by createDistinctAggInfo()
+  private AggregateInfo secondPhaseDistinctAggInfo_;
+
+  private final AggPhase aggPhase_;
+
+  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
+  // in the intermediate tuple. Identical to outputTupleSmap_ if no aggregateExpr has an
+  // output type that is different from its intermediate type.
+  protected ExprSubstitutionMap intermediateTupleSmap_ = new ExprSubstitutionMap();
+
+  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
+  // in the output tuple.
+  protected ExprSubstitutionMap outputTupleSmap_ = new ExprSubstitutionMap();
+
+  // Map from slots of outputTupleSmap_ to the corresponding slot in
+  // intermediateTupleSmap_.
+  protected ExprSubstitutionMap outputToIntermediateTupleSmap_ =
+      new ExprSubstitutionMap();
+
+  // if set, a subset of groupingExprs_; set and used during planning
+  private List<Expr> partitionExprs_;
+
+  // C'tor creates copies of groupingExprs and aggExprs.
+  private AggregateInfo(ArrayList<Expr> groupingExprs,
+      ArrayList<FunctionCallExpr> aggExprs, AggPhase aggPhase)  {
+    super(groupingExprs, aggExprs);
+    aggPhase_ = aggPhase;
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  private AggregateInfo(AggregateInfo other) {
+    super(other);
+    if (other.mergeAggInfo_ != null) {
+      mergeAggInfo_ = other.mergeAggInfo_.clone();
+    }
+    if (other.secondPhaseDistinctAggInfo_ != null) {
+      secondPhaseDistinctAggInfo_ = other.secondPhaseDistinctAggInfo_.clone();
+    }
+    aggPhase_ = other.aggPhase_;
+    outputTupleSmap_ = other.outputTupleSmap_.clone();
+    if (other.requiresIntermediateTuple()) {
+      intermediateTupleSmap_ = other.intermediateTupleSmap_.clone();
+    } else {
+      Preconditions.checkState(other.intermediateTupleDesc_ == other.outputTupleDesc_);
+      intermediateTupleSmap_ = outputTupleSmap_;
+    }
+    partitionExprs_ =
+        (other.partitionExprs_ != null) ? Expr.cloneList(other.partitionExprs_) : null;
+  }
+
+  public List<Expr> getPartitionExprs() { return partitionExprs_; }
+  public void setPartitionExprs(List<Expr> exprs) { partitionExprs_ = exprs; }
+
+  /**
+   * Creates complete AggregateInfo for groupingExprs and aggExprs, including
+   * aggTupleDesc and aggTupleSMap. If parameter tupleDesc != null, sets aggTupleDesc to
+   * that instead of creating a new descriptor (after verifying that the passed-in
+   * descriptor is correct for the given aggregation).
+   * Also creates mergeAggInfo and secondPhaseDistinctAggInfo, if needed.
+   * If an aggTupleDesc is created, also registers eq predicates between the
+   * grouping exprs and their respective slots with 'analyzer'.
+   */
+  static public AggregateInfo create(
+      ArrayList<Expr> groupingExprs, ArrayList<FunctionCallExpr> aggExprs,
+      TupleDescriptor tupleDesc, Analyzer analyzer)
+          throws AnalysisException {
+    Preconditions.checkState(
+        (groupingExprs != null && !groupingExprs.isEmpty())
+        || (aggExprs != null && !aggExprs.isEmpty()));
+    Expr.removeDuplicates(groupingExprs);
+    Expr.removeDuplicates(aggExprs);
+    AggregateInfo result = new AggregateInfo(groupingExprs, aggExprs, AggPhase.FIRST);
+
+    // collect agg exprs with DISTINCT clause
+    ArrayList<FunctionCallExpr> distinctAggExprs = Lists.newArrayList();
+    if (aggExprs != null) {
+      for (FunctionCallExpr aggExpr: aggExprs) {
+        if (aggExpr.isDistinct()) distinctAggExprs.add(aggExpr);
+      }
+    }
+
+    if (distinctAggExprs.isEmpty()) {
+      if (tupleDesc == null) {
+        result.createTupleDescs(analyzer);
+        result.createSmaps(analyzer);
+      } else {
+        // A tupleDesc should only be given for UNION DISTINCT.
+        Preconditions.checkState(aggExprs == null);
+        result.outputTupleDesc_ = tupleDesc;
+        result.intermediateTupleDesc_ = tupleDesc;
+      }
+      result.createMergeAggInfo(analyzer);
+    } else {
+      // we don't allow you to pass in a descriptor for distinct aggregation
+      // (we need two descriptors)
+      Preconditions.checkState(tupleDesc == null);
+      result.createDistinctAggInfo(groupingExprs, distinctAggExprs, analyzer);
+    }
+    LOG.debug("agg info:\n" + result.debugString());
+    return result;
+  }
+
+  /**
+   * Create aggregate info for select block containing aggregate exprs with
+   * DISTINCT clause.
+   * This creates:
+   * - aggTupleDesc
+   * - a complete secondPhaseDistinctAggInfo
+   * - mergeAggInfo
+   *
+   * At the moment, we require that all distinct aggregate
+   * functions be applied to the same set of exprs (ie, we can't do something
+   * like SELECT COUNT(DISTINCT id), COUNT(DISTINCT address)).
+   * Aggregation happens in two successive phases:
+   * - the first phase aggregates by all grouping exprs plus all parameter exprs
+   *   of DISTINCT aggregate functions
+   *
+   * Example:
+   *   SELECT a, COUNT(DISTINCT b, c), MIN(d), COUNT(*) FROM T GROUP BY a
+   * - 1st phase grouping exprs: a, b, c
+   * - 1st phase agg exprs: MIN(d), COUNT(*)
+   * - 2nd phase grouping exprs: a
+   * - 2nd phase agg exprs: COUNT(*), MIN(<MIN(d) from 1st phase>),
+   *     SUM(<COUNT(*) from 1st phase>)
+   *
+   * TODO: expand implementation to cover the general case; this will require
+   * a different execution strategy
+   */
+  private void createDistinctAggInfo(
+      ArrayList<Expr> origGroupingExprs,
+      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
+          throws AnalysisException {
+    Preconditions.checkState(!distinctAggExprs.isEmpty());
+    // make sure that all DISTINCT params are the same;
+    // ignore top-level implicit casts in the comparison, we might have inserted
+    // those during analysis
+    ArrayList<Expr> expr0Children = Lists.newArrayList();
+
+    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
+        "group_concat")) {
+      // Ignore separator parameter, otherwise the same would have to be present for all
+      // other distinct aggregates as well.
+      // TODO: Deal with constant exprs more generally, instead of special-casing
+      // group_concat().
+      expr0Children.add(distinctAggExprs.get(0).getChild(0).ignoreImplicitCast());
+    } else {
+      for (Expr expr : distinctAggExprs.get(0).getChildren()) {
+        expr0Children.add(expr.ignoreImplicitCast());
+      }
+    }
+    for (int i = 1; i < distinctAggExprs.size(); ++i) {
+      ArrayList<Expr> exprIChildren = Lists.newArrayList();
+      if (distinctAggExprs.get(i).getFnName().getFunction().equalsIgnoreCase(
+          "group_concat")) {
+        exprIChildren.add(distinctAggExprs.get(i).getChild(0).ignoreImplicitCast());
+      } else {
+        for (Expr expr : distinctAggExprs.get(i).getChildren()) {
+          exprIChildren.add(expr.ignoreImplicitCast());
+        }
+      }
+      if (!Expr.equalLists(expr0Children, exprIChildren)) {
+        throw new AnalysisException(
+            "all DISTINCT aggregate functions need to have the same set of "
+            + "parameters as " + distinctAggExprs.get(0).toSql()
+            + "; deviating function: " + distinctAggExprs.get(i).toSql());
+      }
+    }
+
+    // add DISTINCT parameters to grouping exprs
+    groupingExprs_.addAll(expr0Children);
+
+    // remove DISTINCT aggregate functions from aggExprs
+    aggregateExprs_.removeAll(distinctAggExprs);
+
+    createTupleDescs(analyzer);
+    createSmaps(analyzer);
+    createMergeAggInfo(analyzer);
+    createSecondPhaseAggInfo(origGroupingExprs, distinctAggExprs, analyzer);
+  }
+
+  public AggregateInfo getMergeAggInfo() { return mergeAggInfo_; }
+  public AggregateInfo getSecondPhaseDistinctAggInfo() {
+    return secondPhaseDistinctAggInfo_;
+  }
+  public AggPhase getAggPhase() { return aggPhase_; }
+  public boolean isMerge() { return aggPhase_.isMerge(); }
+  public boolean isDistinctAgg() { return secondPhaseDistinctAggInfo_ != null; }
+  public ExprSubstitutionMap getIntermediateSmap() { return intermediateTupleSmap_; }
+  public ExprSubstitutionMap getOutputSmap() { return outputTupleSmap_; }
+  public ExprSubstitutionMap getOutputToIntermediateSmap() {
+    return outputToIntermediateTupleSmap_;
+  }
+
+  public boolean hasAggregateExprs() {
+    return !aggregateExprs_.isEmpty() ||
+        (secondPhaseDistinctAggInfo_ != null &&
+         !secondPhaseDistinctAggInfo_.getAggregateExprs().isEmpty());
+  }
+
+  /**
+   * Return the tuple id produced in the final aggregation step.
+   */
+  public TupleId getResultTupleId() {
+    if (isDistinctAgg()) return secondPhaseDistinctAggInfo_.getOutputTupleId();
+    return getOutputTupleId();
+  }
+
+  public ArrayList<FunctionCallExpr> getMaterializedAggregateExprs() {
+    ArrayList<FunctionCallExpr> result = Lists.newArrayList();
+    for (Integer i: materializedSlots_) {
+      result.add(aggregateExprs_.get(i));
+    }
+    return result;
+  }
+
+  /**
+   * Append ids of all slots that are being referenced in the process
+   * of performing the aggregate computation described by this AggregateInfo.
+   */
+  public void getRefdSlots(List<SlotId> ids) {
+    Preconditions.checkState(outputTupleDesc_ != null);
+    if (groupingExprs_ != null) {
+      Expr.getIds(groupingExprs_, null, ids);
+    }
+    Expr.getIds(aggregateExprs_, null, ids);
+    // The backend assumes that the entire aggTupleDesc is materialized
+    for (int i = 0; i < outputTupleDesc_.getSlots().size(); ++i) {
+      ids.add(outputTupleDesc_.getSlots().get(i).getId());
+    }
+  }
+
+  /**
+   * Substitute all the expressions (grouping expr, aggregate expr) and update our
+   * substitution map according to the given substitution map:
+   * - smap typically maps from tuple t1 to tuple t2 (example: the smap of an
+   *   inline view maps the virtual table ref t1 into a base table ref t2)
+   * - our grouping and aggregate exprs need to be substituted with the given
+   *   smap so that they also reference t2
+   * - aggTupleSMap needs to be recomputed to map exprs based on t2
+   *   onto our aggTupleDesc (ie, the left-hand side needs to be substituted with
+   *   smap)
+   * - mergeAggInfo: this is not affected, because
+   *   * its grouping and aggregate exprs only reference aggTupleDesc_
+   *   * its smap is identical to aggTupleSMap_
+   * - 2ndPhaseDistinctAggInfo:
+   *   * its grouping and aggregate exprs also only reference aggTupleDesc_
+   *     and are therefore not affected
+   *   * its smap needs to be recomputed to map exprs based on t2 to its own
+   *     aggTupleDesc
+   */
+  public void substitute(ExprSubstitutionMap smap, Analyzer analyzer)
+      throws InternalException {
+    groupingExprs_ = Expr.substituteList(groupingExprs_, smap, analyzer, false);
+    LOG.trace("AggInfo: grouping_exprs=" + Expr.debugString(groupingExprs_));
+
+    // The smap in this case should not substitute the aggs themselves, only
+    // their subexpressions.
+    List<Expr> substitutedAggs =
+        Expr.substituteList(aggregateExprs_, smap, analyzer, false);
+    aggregateExprs_.clear();
+    for (Expr substitutedAgg: substitutedAggs) {
+      aggregateExprs_.add((FunctionCallExpr) substitutedAgg);
+    }
+
+    LOG.trace("AggInfo: agg_exprs=" + Expr.debugString(aggregateExprs_));
+    outputTupleSmap_.substituteLhs(smap, analyzer);
+    intermediateTupleSmap_.substituteLhs(smap, analyzer);
+    if (secondPhaseDistinctAggInfo_ != null) {
+      secondPhaseDistinctAggInfo_.substitute(smap, analyzer);
+    }
+  }
+
+  /**
+   * Create the info for an aggregation node that merges its pre-aggregated inputs:
+   * - pre-aggregation is computed by 'this'
+   * - tuple desc and smap are the same as that of the input (we're materializing
+   *   the same logical tuple)
+   * - grouping exprs: slotrefs to the input's grouping slots
+   * - aggregate exprs: aggregation of the input's aggregateExprs slots
+   *
+   * The returned AggregateInfo shares its descriptor and smap with the input info;
+   * createAggTupleDesc() must not be called on it.
+   */
+  private void createMergeAggInfo(Analyzer analyzer) {
+    Preconditions.checkState(mergeAggInfo_ == null);
+    TupleDescriptor inputDesc = intermediateTupleDesc_;
+    // construct grouping exprs
+    ArrayList<Expr> groupingExprs = Lists.newArrayList();
+    for (int i = 0; i < getGroupingExprs().size(); ++i) {
+      SlotRef slotRef = new SlotRef(inputDesc.getSlots().get(i));
+      groupingExprs.add(slotRef);
+    }
+
+    // construct agg exprs
+    ArrayList<FunctionCallExpr> aggExprs = Lists.newArrayList();
+    for (int i = 0; i < getAggregateExprs().size(); ++i) {
+      FunctionCallExpr inputExpr = getAggregateExprs().get(i);
+      Preconditions.checkState(inputExpr.isAggregateFunction());
+      Expr aggExprParam =
+          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
+      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
+          inputExpr, Lists.newArrayList(aggExprParam));
+      aggExpr.analyzeNoThrow(analyzer);
+      aggExprs.add(aggExpr);
+    }
+
+    AggPhase aggPhase =
+        (aggPhase_ == AggPhase.FIRST) ? AggPhase.FIRST_MERGE : AggPhase.SECOND_MERGE;
+    mergeAggInfo_ = new AggregateInfo(groupingExprs, aggExprs, aggPhase);
+    mergeAggInfo_.intermediateTupleDesc_ = intermediateTupleDesc_;
+    mergeAggInfo_.outputTupleDesc_ = outputTupleDesc_;
+    mergeAggInfo_.intermediateTupleSmap_ = intermediateTupleSmap_;
+    mergeAggInfo_.outputTupleSmap_ = outputTupleSmap_;
+    mergeAggInfo_.materializedSlots_ = materializedSlots_;
+  }
+
+  /**
+   * Creates an IF function call that returns NULL if any of the slots
+   * at indexes [firstIdx, lastIdx] return NULL.
+   * For example, the resulting IF function would like this for 3 slots:
+   * IF(IsNull(slot1), NULL, IF(IsNull(slot2), NULL, slot3))
+   * Returns null if firstIdx is greater than lastIdx.
+   * Returns a SlotRef to the last slot if there is only one slot in range.
+   */
+  private Expr createCountDistinctAggExprParam(int firstIdx, int lastIdx,
+      ArrayList<SlotDescriptor> slots) {
+    if (firstIdx > lastIdx) return null;
+
+    Expr elseExpr = new SlotRef(slots.get(lastIdx));
+    if (firstIdx == lastIdx) return elseExpr;
+
+    for (int i = lastIdx - 1; i >= firstIdx; --i) {
+      ArrayList<Expr> ifArgs = Lists.newArrayList();
+      SlotRef slotRef = new SlotRef(slots.get(i));
+      // Build expr: IF(IsNull(slotRef), NULL, elseExpr)
+      Expr isNullPred = new IsNullPredicate(slotRef, false);
+      ifArgs.add(isNullPred);
+      ifArgs.add(new NullLiteral());
+      ifArgs.add(elseExpr);
+      elseExpr = new FunctionCallExpr("if", ifArgs);
+    }
+    return elseExpr;
+  }
+
+  /**
+   * Create the info for an aggregation node that computes the second phase of
+   * DISTINCT aggregate functions.
+   * (Refer to createDistinctAggInfo() for an explanation of the phases.)
+   * - 'this' is the phase 1 aggregation
+   * - grouping exprs are those of the original query (param origGroupingExprs)
+   * - aggregate exprs for the DISTINCT agg fns: these are aggregating the grouping
+   *   slots that were added to the original grouping slots in phase 1;
+   *   count is mapped to count(*) and sum is mapped to sum
+   * - other aggregate exprs: same as the non-DISTINCT merge case
+   *   (count is mapped to sum, everything else stays the same)
+   *
+   * This call also creates the tuple descriptor and smap for the returned AggregateInfo.
+   */
+  private void createSecondPhaseAggInfo(
+      ArrayList<Expr> origGroupingExprs,
+      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
+      throws AnalysisException {
+    Preconditions.checkState(secondPhaseDistinctAggInfo_ == null);
+    Preconditions.checkState(!distinctAggExprs.isEmpty());
+    // The output of the 1st phase agg is the 1st phase intermediate.
+    TupleDescriptor inputDesc = intermediateTupleDesc_;
+
+    // construct agg exprs for original DISTINCT aggregate functions
+    // (these aren't part of aggExprs_)
+    ArrayList<FunctionCallExpr> secondPhaseAggExprs = Lists.newArrayList();
+    for (FunctionCallExpr inputExpr: distinctAggExprs) {
+      Preconditions.checkState(inputExpr.isAggregateFunction());
+      FunctionCallExpr aggExpr = null;
+      if (inputExpr.getFnName().getFunction().equals("count")) {
+        // COUNT(DISTINCT ...) ->
+        // COUNT(IF(IsNull(<agg slot 1>), NULL, IF(IsNull(<agg slot 2>), NULL, ...)))
+        // We need the nested IF to make sure that we do not count
+        // column-value combinations if any of the distinct columns are NULL.
+        // This behavior is consistent with MySQL.
+        Expr ifExpr = createCountDistinctAggExprParam(origGroupingExprs.size(),
+            origGroupingExprs.size() + inputExpr.getChildren().size() - 1,
+            inputDesc.getSlots());
+        Preconditions.checkNotNull(ifExpr);
+        ifExpr.analyzeNoThrow(analyzer);
+        aggExpr = new FunctionCallExpr("count", Lists.newArrayList(ifExpr));
+      } else if (inputExpr.getFnName().getFunction().equals("group_concat")) {
+        // Syntax: GROUP_CONCAT([DISTINCT] expression [, separator])
+        ArrayList<Expr> exprList = Lists.newArrayList();
+        // Add "expression" parameter. Need to get it from the inputDesc's slots so the
+        // tuple reference is correct.
+        exprList.add(new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size())));
+        // Check if user provided a custom separator
+        if (inputExpr.getChildren().size() == 2) exprList.add(inputExpr.getChild(1));
+        aggExpr = new FunctionCallExpr(inputExpr.getFnName(), exprList);
+      } else {
+        // SUM(DISTINCT <expr>) -> SUM(<last grouping slot>);
+        // (MIN(DISTINCT ...) and MAX(DISTINCT ...) have their DISTINCT turned
+        // off during analysis, and AVG() is changed to SUM()/COUNT())
+        Expr aggExprParam =
+            new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size()));
+        aggExpr = new FunctionCallExpr(inputExpr.getFnName(),
+            Lists.newArrayList(aggExprParam));
+      }
+      secondPhaseAggExprs.add(aggExpr);
+    }
+
+    // map all the remaining agg fns
+    for (int i = 0; i < aggregateExprs_.size(); ++i) {
+      FunctionCallExpr inputExpr = aggregateExprs_.get(i);
+      Preconditions.checkState(inputExpr.isAggregateFunction());
+      // we're aggregating an intermediate slot of the 1st agg phase
+      Expr aggExprParam =
+          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
+      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
+          inputExpr, Lists.newArrayList(aggExprParam));
+      secondPhaseAggExprs.add(aggExpr);
+    }
+    Preconditions.checkState(
+        secondPhaseAggExprs.size() == aggregateExprs_.size() + distinctAggExprs.size());
+
+    for (FunctionCallExpr aggExpr: secondPhaseAggExprs) {
+      aggExpr.analyzeNoThrow(analyzer);
+      Preconditions.checkState(aggExpr.isAggregateFunction());
+    }
+
+    ArrayList<Expr> substGroupingExprs =
+        Expr.substituteList(origGroupingExprs, intermediateTupleSmap_, analyzer, false);
+    secondPhaseDistinctAggInfo_ =
+        new AggregateInfo(substGroupingExprs, secondPhaseAggExprs, AggPhase.SECOND);
+    secondPhaseDistinctAggInfo_.createTupleDescs(analyzer);
+    secondPhaseDistinctAggInfo_.createSecondPhaseAggSMap(this, distinctAggExprs);
+    secondPhaseDistinctAggInfo_.createMergeAggInfo(analyzer);
+  }
+
+  /**
+   * Create smap to map original grouping and aggregate exprs onto output
+   * of secondPhaseDistinctAggInfo.
+   */
+  private void createSecondPhaseAggSMap(
+      AggregateInfo inputAggInfo, ArrayList<FunctionCallExpr> distinctAggExprs) {
+    outputTupleSmap_.clear();
+    int slotIdx = 0;
+    ArrayList<SlotDescriptor> slotDescs = outputTupleDesc_.getSlots();
+
+    int numDistinctParams = distinctAggExprs.get(0).getChildren().size();
+    // If we are counting distinct params of group_concat, we cannot include the custom
+    // separator since it is not a distinct param.
+    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
+        "group_concat")
+        && numDistinctParams == 2) {
+      --numDistinctParams;
+    }
+    int numOrigGroupingExprs =
+        inputAggInfo.getGroupingExprs().size() - numDistinctParams;
+    Preconditions.checkState(slotDescs.size() ==
+        numOrigGroupingExprs + distinctAggExprs.size() +
+        inputAggInfo.getAggregateExprs().size());
+
+    // original grouping exprs -> first m slots
+    for (int i = 0; i < numOrigGroupingExprs; ++i, ++slotIdx) {
+      Expr groupingExpr = inputAggInfo.getGroupingExprs().get(i);
+      outputTupleSmap_.put(
+          groupingExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
+    }
+
+    // distinct agg exprs -> next n slots
+    for (int i = 0; i < distinctAggExprs.size(); ++i, ++slotIdx) {
+      Expr aggExpr = distinctAggExprs.get(i);
+      outputTupleSmap_.put(
+          aggExpr.clone(), (new SlotRef(slotDescs.get(slotIdx))));
+    }
+
+    // remaining agg exprs -> remaining slots
+    for (int i = 0; i < inputAggInfo.getAggregateExprs().size(); ++i, ++slotIdx) {
+      Expr aggExpr = inputAggInfo.getAggregateExprs().get(i);
+      outputTupleSmap_.put(aggExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
+    }
+  }
+
+  /**
+   * Populates the output and intermediate smaps based on the output and intermediate
+   * tuples that are assumed to be set. If an intermediate tuple is required, also
+   * populates the output-to-intermediate smap and registers auxiliary equivalence
+   * predicates between the grouping slots of the two tuples.
+   */
+  public void createSmaps(Analyzer analyzer) {
+    Preconditions.checkNotNull(outputTupleDesc_);
+    Preconditions.checkNotNull(intermediateTupleDesc_);
+
+    List<Expr> exprs = Lists.newArrayListWithCapacity(
+        groupingExprs_.size() + aggregateExprs_.size());
+    exprs.addAll(groupingExprs_);
+    exprs.addAll(aggregateExprs_);
+    for (int i = 0; i < exprs.size(); ++i) {
+      outputTupleSmap_.put(exprs.get(i).clone(),
+          new SlotRef(outputTupleDesc_.getSlots().get(i)));
+      if (!requiresIntermediateTuple()) continue;
+      intermediateTupleSmap_.put(exprs.get(i).clone(),
+          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
+      outputToIntermediateTupleSmap_.put(
+          new SlotRef(outputTupleDesc_.getSlots().get(i)),
+          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
+      if (i < groupingExprs_.size()) {
+        analyzer.createAuxEquivPredicate(
+            new SlotRef(outputTupleDesc_.getSlots().get(i)),
+            new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
+      }
+    }
+    if (!requiresIntermediateTuple()) intermediateTupleSmap_ = outputTupleSmap_;
+
+    LOG.trace("output smap=" + outputTupleSmap_.debugString());
+    LOG.trace("intermediate smap=" + intermediateTupleSmap_.debugString());
+  }
+
+  /**
+   * Mark slots required for this aggregation as materialized:
+   * - all grouping output slots as well as grouping exprs
+   * - for non-distinct aggregation: the aggregate exprs of materialized aggregate slots;
+   *   this assumes that the output slots corresponding to aggregate exprs have already
+   *   been marked by the consumer of this select block
+   * - for distinct aggregation, we mark all aggregate output slots in order to keep
+   *   things simple
+   * Also computes materializedAggregateExprs.
+   * This call must be idempotent because it may be called more than once for Union stmt.
+   */
+  @Override
+  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
+    for (int i = 0; i < groupingExprs_.size(); ++i) {
+      outputTupleDesc_.getSlots().get(i).setIsMaterialized(true);
+      intermediateTupleDesc_.getSlots().get(i).setIsMaterialized(true);
+    }
+
+    // collect input exprs: grouping exprs plus aggregate exprs that need to be
+    // materialized
+    materializedSlots_.clear();
+    List<Expr> exprs = Lists.newArrayList();
+    exprs.addAll(groupingExprs_);
+    for (int i = 0; i < aggregateExprs_.size(); ++i) {
+      SlotDescriptor slotDesc =
+          outputTupleDesc_.getSlots().get(groupingExprs_.size() + i);
+      SlotDescriptor intermediateSlotDesc =
+          intermediateTupleDesc_.getSlots().get(groupingExprs_.size() + i);
+      if (isDistinctAgg()) {
+        slotDesc.setIsMaterialized(true);
+        intermediateSlotDesc.setIsMaterialized(true);
+      }
+      if (!slotDesc.isMaterialized()) continue;
+      intermediateSlotDesc.setIsMaterialized(true);
+      exprs.add(aggregateExprs_.get(i));
+      materializedSlots_.add(i);
+    }
+    List<Expr> resolvedExprs = Expr.substituteList(exprs, smap, analyzer, false);
+    analyzer.materializeSlots(resolvedExprs);
+
+    if (isDistinctAgg()) {
+      secondPhaseDistinctAggInfo_.materializeRequiredSlots(analyzer, null);
+    }
+  }
+
+  /**
+   * Checks if all materialized aggregate expressions have distinct semantics.
+   * It returns true if either of the following is true:
+   * (1) all materialized aggregate expressions have distinct semantics
+   *     (e.g. MIN, MAX, NDV). In other words, this optimization will work
+   *     for COUNT(DISTINCT c) but not COUNT(c).
+   * (2) there are no aggregate expressions but only grouping expressions.
+   */
+  public boolean hasAllDistinctAgg() {
+    if (hasAggregateExprs()) {
+      for (FunctionCallExpr aggExpr : getMaterializedAggregateExprs()) {
+        if (!aggExpr.isDistinct() && !aggExpr.ignoresDistinct()) return false;
+      }
+    } else {
+      Preconditions.checkState(!groupingExprs_.isEmpty());
+    }
+    return true;
+  }
+
+  /**
+   * Validates the internal state of this agg info: Checks that the number of
+   * materialized slots of the output tuple corresponds to the number of materialized
+   * aggregate functions plus the number of grouping exprs. Also checks that the return
+   * types of the aggregate and grouping exprs correspond to the slots in the output
+   * tuple.
+   */
+  public void checkConsistency() {
+    ArrayList<SlotDescriptor> slots = outputTupleDesc_.getSlots();
+
+    // Check materialized slots.
+    int numMaterializedSlots = 0;
+    for (SlotDescriptor slotDesc: slots) {
+      if (slotDesc.isMaterialized()) ++numMaterializedSlots;
+    }
+    Preconditions.checkState(numMaterializedSlots ==
+        materializedSlots_.size() + groupingExprs_.size());
+
+    // Check that grouping expr return types match the slot descriptors.
+    int slotIdx = 0;
+    for (int i = 0; i < groupingExprs_.size(); ++i) {
+      Expr groupingExpr = groupingExprs_.get(i);
+      Type slotType = slots.get(slotIdx).getType();
+      Preconditions.checkState(groupingExpr.getType().equals(slotType),
+          String.format("Grouping expr %s returns type %s but its output tuple " +
+              "slot has type %s", groupingExpr.toSql(),
+              groupingExpr.getType().toString(), slotType.toString()));
+      ++slotIdx;
+    }
+    // Check that aggregate expr return types match the slot descriptors.
+    for (int i = 0; i < aggregateExprs_.size(); ++i) {
+      Expr aggExpr = aggregateExprs_.get(i);
+      Type slotType = slots.get(slotIdx).getType();
+      Preconditions.checkState(aggExpr.getType().equals(slotType),
+          String.format("Agg expr %s returns type %s but its output tuple " +
+              "slot has type %s", aggExpr.toSql(), aggExpr.getType().toString(),
+              slotType.toString()));
+      ++slotIdx;
+    }
+  }
+
+  /**
+   * Returns DataPartition derived from grouping exprs.
+   * Returns unpartitioned spec if no grouping.
+   * TODO: this won't work when we start supporting range partitions,
+   * because we could derive both hash and order-based partitions
+   */
+  public DataPartition getPartition() {
+    if (groupingExprs_.isEmpty()) {
+      return DataPartition.UNPARTITIONED;
+    } else {
+      return DataPartition.hashPartitioned(groupingExprs_);
+    }
+  }
+
+  @Override
+  public String debugString() {
+    StringBuilder out = new StringBuilder(super.debugString());
+    out.append(Objects.toStringHelper(this)
+        .add("phase", aggPhase_)
+        .add("intermediate_smap", intermediateTupleSmap_.debugString())
+        .add("output_smap", outputTupleSmap_.debugString())
+        .toString());
+    if (mergeAggInfo_ != this && mergeAggInfo_ != null) {
+      out.append("\nmergeAggInfo:\n" + mergeAggInfo_.debugString());
+    }
+    if (secondPhaseDistinctAggInfo_ != null) {
+      out.append("\nsecondPhaseDistinctAggInfo:\n"
+          + secondPhaseDistinctAggInfo_.debugString());
+    }
+    return out.toString();
+  }
+
+  @Override
+  protected String tupleDebugName() { return "agg-tuple"; }
+
+  @Override
+  public AggregateInfo clone() { return new AggregateInfo(this); }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
new file mode 100644
index 0000000..f3ad3f8
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfoBase.java
@@ -0,0 +1,221 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.catalog.AggregateFunction;
+import com.cloudera.impala.catalog.ColumnStats;
+import com.cloudera.impala.catalog.Type;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
+/**
+ * Base class for AggregateInfo and AnalyticInfo containing the intermediate and output
+ * tuple descriptors as well as their smaps for evaluating aggregate functions.
+ */
+public abstract class AggregateInfoBase {
+  private final static Logger LOG =
+      LoggerFactory.getLogger(AggregateInfoBase.class);
+
+  // For aggregations: All unique grouping expressions from a select block.
+  // For analytics: Empty.
+  protected ArrayList<Expr> groupingExprs_;
+
+  // For aggregations: All unique aggregate expressions from a select block.
+  // For analytics: The results of AnalyticExpr.getFnCall() for the unique
+  // AnalyticExprs of a select block.
+  protected ArrayList<FunctionCallExpr> aggregateExprs_;
+
+  // The tuple into which the intermediate output of an aggregation is materialized.
+  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
+  // contain the values of the grouping exprs, followed by slots into which the
+  // aggregateExprs' update()/merge() symbols materialize their output, i.e., slots
+  // of the aggregate functions' intermediate types.
+  // Identical to outputTupleDesc_ if no aggregateExpr has an output type that is
+  // different from its intermediate type.
+  protected TupleDescriptor intermediateTupleDesc_;
+
+  // The tuple into which the final output of the aggregation is materialized.
+  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
+  // contain the values of the grouping exprs, followed by slots into which the
+  // aggregateExprs' finalize() symbol write its result, i.e., slots of the aggregate
+  // functions' output types.
+  protected TupleDescriptor outputTupleDesc_;
+
+  // For aggregation: indices into aggregate exprs for that need to be materialized
+  // For analytics: indices into the analytic exprs and their corresponding aggregate
+  // exprs that need to be materialized.
+  // Populated in materializeRequiredSlots() which must be implemented by subclasses.
+  protected ArrayList<Integer> materializedSlots_ = Lists.newArrayList();
+
+  protected AggregateInfoBase(ArrayList<Expr> groupingExprs,
+      ArrayList<FunctionCallExpr> aggExprs)  {
+    Preconditions.checkState(groupingExprs != null || aggExprs != null);
+    groupingExprs_ =
+        groupingExprs != null ? Expr.cloneList(groupingExprs) : new ArrayList<Expr>();
+    Preconditions.checkState(aggExprs != null || !(this instanceof AnalyticInfo));
+    aggregateExprs_ =
+        aggExprs != null ? Expr.cloneList(aggExprs) : new ArrayList<FunctionCallExpr>();
+  }
+
+  /**
+   * C'tor for cloning.
+   */
+  protected AggregateInfoBase(AggregateInfoBase other) {
+    groupingExprs_ =
+        (other.groupingExprs_ != null) ? Expr.cloneList(other.groupingExprs_) : null;
+    aggregateExprs_ =
+        (other.aggregateExprs_ != null) ? Expr.cloneList(other.aggregateExprs_) : null;
+    intermediateTupleDesc_ = other.intermediateTupleDesc_;
+    outputTupleDesc_ = other.outputTupleDesc_;
+    materializedSlots_ = Lists.newArrayList(other.materializedSlots_);
+  }
+
+  /**
+   * Creates the intermediate and output tuple descriptors. If no agg expr has an
+   * intermediate type different from its output type, then only the output tuple
+   * descriptor is created and the intermediate tuple is set to the output tuple.
+   */
+  protected void createTupleDescs(Analyzer analyzer) {
+    // Create the intermediate tuple desc first, so that the tuple ids are increasing
+    // from bottom to top in the plan tree.
+    intermediateTupleDesc_ = createTupleDesc(analyzer, false);
+    if (requiresIntermediateTuple(aggregateExprs_)) {
+      outputTupleDesc_ = createTupleDesc(analyzer, true);
+    } else {
+      outputTupleDesc_ = intermediateTupleDesc_;
+    }
+  }
+
+  /**
+   * Returns a tuple descriptor for the aggregation/analytic's intermediate or final
+   * result, depending on whether isOutputTuple is true or false.
+   * Also updates the appropriate substitution map, and creates and registers auxiliary
+   * equality predicates between the grouping slots and the grouping exprs.
+   */
+  private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple) {
+    TupleDescriptor result =
+        analyzer.getDescTbl().createTupleDescriptor(
+          tupleDebugName() + (isOutputTuple ? "-out" : "-intermed"));
+    List<Expr> exprs = Lists.newArrayListWithCapacity(
+        groupingExprs_.size() + aggregateExprs_.size());
+    exprs.addAll(groupingExprs_);
+    exprs.addAll(aggregateExprs_);
+
+    int aggregateExprStartIndex = groupingExprs_.size();
+    for (int i = 0; i < exprs.size(); ++i) {
+      Expr expr = exprs.get(i);
+      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(result);
+      slotDesc.initFromExpr(expr);
+      if (i < aggregateExprStartIndex) {
+        // register equivalence between grouping slot and grouping expr;
+        // do this only when the grouping expr isn't a constant, otherwise
+        // it'll simply show up as a gratuitous HAVING predicate
+        // (which would actually be incorrect if the constant happens to be NULL)
+        if (!expr.isConstant()) {
+          analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
+        }
+      } else {
+        Preconditions.checkArgument(expr instanceof FunctionCallExpr);
+        FunctionCallExpr aggExpr = (FunctionCallExpr)expr;
+        if (aggExpr.isMergeAggFn()) {
+          slotDesc.setLabel(aggExpr.getChild(0).toSql());
+          slotDesc.setSourceExpr(aggExpr.getChild(0));
+        } else {
+          slotDesc.setLabel(aggExpr.toSql());
+          slotDesc.setSourceExpr(aggExpr);
+        }
+
+        // count(*) is non-nullable.
+        if (aggExpr.getFnName().getFunction().equals("count")) {
+          // TODO: Consider making nullability a property of types or of builtin agg fns.
+          // row_number, rank, and dense_rank are non-nullable as well.
+          slotDesc.setIsNullable(false);
+        }
+        if (!isOutputTuple) {
+          Type intermediateType = ((AggregateFunction)aggExpr.fn_).getIntermediateType();
+          if (intermediateType != null) {
+            // Use the output type as intermediate if the function has a wildcard decimal.
+            if (!intermediateType.isWildcardDecimal()) {
+              slotDesc.setType(intermediateType);
+            } else {
+              Preconditions.checkState(expr.getType().isDecimal());
+            }
+          }
+        }
+      }
+    }
+    String prefix = (isOutputTuple ? "result " : "intermediate ");
+    LOG.trace(prefix + " tuple=" + result.debugString());
+    return result;
+  }
+
+  /**
+   * Marks the slots required for evaluating an Analytic/AggregateInfo by
+   * resolving the materialized aggregate/analytic exprs against smap,
+   * and then marking their slots.
+   */
+  public abstract void materializeRequiredSlots(Analyzer analyzer,
+      ExprSubstitutionMap smap);
+
+  public ArrayList<Expr> getGroupingExprs() { return groupingExprs_; }
+  public ArrayList<FunctionCallExpr> getAggregateExprs() { return aggregateExprs_; }
+  public TupleDescriptor getOutputTupleDesc() { return outputTupleDesc_; }
+  public TupleDescriptor getIntermediateTupleDesc() { return intermediateTupleDesc_; }
+  public TupleId getIntermediateTupleId() { return intermediateTupleDesc_.getId(); }
+  public TupleId getOutputTupleId() { return outputTupleDesc_.getId(); }
+  public boolean requiresIntermediateTuple() {
+    Preconditions.checkNotNull(intermediateTupleDesc_);
+    Preconditions.checkNotNull(outputTupleDesc_);
+    return intermediateTupleDesc_ != outputTupleDesc_;
+  }
+
+  /**
+   * Returns true if evaluating the given aggregate exprs requires an intermediate tuple,
+   * i.e., whether one of the aggregate functions has an intermediate type different from
+   * its output type.
+   */
+  public static <T extends Expr> boolean requiresIntermediateTuple(List<T> aggExprs) {
+    for (Expr aggExpr: aggExprs) {
+      Type intermediateType = ((AggregateFunction) aggExpr.fn_).getIntermediateType();
+      if (intermediateType != null) return true;
+    }
+    return false;
+  }
+
+  public String debugString() {
+    StringBuilder out = new StringBuilder();
+    out.append(Objects.toStringHelper(this)
+        .add("grouping_exprs", Expr.debugString(groupingExprs_))
+        .add("aggregate_exprs", Expr.debugString(aggregateExprs_))
+        .add("intermediate_tuple", (intermediateTupleDesc_ == null)
+            ? "null" : intermediateTupleDesc_.debugString())
+        .add("output_tuple", (outputTupleDesc_ == null)
+            ? "null" : outputTupleDesc_.debugString())
+        .toString());
+    return out.toString();
+  }
+
+  protected abstract String tupleDebugName();
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
new file mode 100644
index 0000000..a5cb2ca
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -0,0 +1,117 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.common.FileSystemUtil;
+import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.permission.FsAction;
+
+/**
+ * Represents an ALTER TABLE ADD PARTITION statement.
+ */
+public class AlterTableAddPartitionStmt extends AlterTableStmt {
+  private final HdfsUri location_;
+  private final boolean ifNotExists_;
+  private final PartitionSpec partitionSpec_;
+  private final HdfsCachingOp cacheOp_;
+
+  public AlterTableAddPartitionStmt(TableName tableName,
+      PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
+      HdfsCachingOp cacheOp) {
+    super(tableName);
+    Preconditions.checkState(partitionSpec != null);
+    location_ = location;
+    ifNotExists_ = ifNotExists;
+    partitionSpec_ = partitionSpec;
+    partitionSpec_.setTableName(tableName);
+    cacheOp_ = cacheOp;
+  }
+
+  public boolean getIfNotExists() { return ifNotExists_; }
+  public HdfsUri getLocation() { return location_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
+    sb.append(" ADD ");
+    if (ifNotExists_) {
+      sb.append("IF NOT EXISTS ");
+    }
+    sb.append(" " + partitionSpec_.toSql());
+    if (location_ != null) {
+      sb.append(String.format(" LOCATION '%s'", location_));
+    }
+    if (cacheOp_ != null) sb.append(cacheOp_.toSql());
+    return sb.toString();
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.ADD_PARTITION);
+    TAlterTableAddPartitionParams addPartParams = new TAlterTableAddPartitionParams();
+    addPartParams.setPartition_spec(partitionSpec_.toThrift());
+    addPartParams.setLocation(location_ == null ? null : location_.toString());
+    addPartParams.setIf_not_exists(ifNotExists_);
+    if (cacheOp_ != null) addPartParams.setCache_op(cacheOp_.toThrift());
+    params.setAdd_partition_params(addPartParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+
+    if (location_ != null) {
+      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    }
+
+    boolean shouldCache = false;
+    Table table = getTargetTable();
+    if (cacheOp_ != null) {
+      cacheOp_.analyze(analyzer);
+      shouldCache = cacheOp_.shouldCache();
+    } else if (table instanceof HdfsTable) {
+      shouldCache = ((HdfsTable)table).isMarkedCached();
+    }
+    if (shouldCache) {
+      if (!(table instanceof HdfsTable)) {
+        throw new AnalysisException("Caching must target a HDFS table: " +
+            table.getFullName());
+      }
+      HdfsTable hdfsTable = (HdfsTable)table;
+      if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath())) ||
+          (location_ == null && !hdfsTable.isLocationCacheable())) {
+        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
+            "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
+            (location_ != null) ? location_.toString() : hdfsTable.getLocation(),
+            table.getFullName()));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
new file mode 100644
index 0000000..aaa223a
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddReplaceColsStmt.java
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.Column;
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * Represents an ALTER TABLE ADD|REPLACE COLUMNS (colDef1, colDef2, ...) statement.
+ */
+public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
+  private final List<ColumnDef> columnDefs_;
+  private final boolean replaceExistingCols_;
+
+  public AlterTableAddReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs,
+      boolean replaceExistingCols) {
+    super(tableName);
+    Preconditions.checkState(columnDefs != null && columnDefs.size() > 0);
+    columnDefs_ = Lists.newArrayList(columnDefs);
+    replaceExistingCols_ = replaceExistingCols;
+  }
+
+  public List<ColumnDef> getColumnDescs() { return columnDefs_; }
+
+  // Replace columns instead of appending new columns.
+  public boolean getReplaceExistingCols() {
+    return replaceExistingCols_;
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.ADD_REPLACE_COLUMNS);
+    TAlterTableAddReplaceColsParams colParams = new TAlterTableAddReplaceColsParams();
+    for (ColumnDef col: getColumnDescs()) {
+      colParams.addToColumns(col.toThrift());
+    }
+    colParams.setReplace_existing_cols(replaceExistingCols_);
+    params.setAdd_replace_cols_params(colParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support column-level DDL on HBase tables. Requires updating the column
+    // mappings along with the table columns.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE ADD|REPLACE COLUMNS not currently " +
+          "supported on HBase tables.");
+    }
+
+    // Build a set of the partition keys for the table.
+    Set<String> existingPartitionKeys = Sets.newHashSet();
+    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
+      existingPartitionKeys.add(fs.getName().toLowerCase());
+    }
+
+    // Make sure the new columns don't already exist in the table, that the names
+    // are all valid and unique, and that none of the columns conflict with
+    // partition columns.
+    Set<String> colNames = Sets.newHashSet();
+    for (ColumnDef c: columnDefs_) {
+      c.analyze();
+      String colName = c.getColName().toLowerCase();
+      if (existingPartitionKeys.contains(colName)) {
+        throw new AnalysisException(
+            "Column name conflicts with existing partition column: " + colName);
+      }
+
+      Column col = t.getColumn(colName);
+      if (col != null && !replaceExistingCols_) {
+        throw new AnalysisException("Column already exists: " + colName);
+      } else if (!colNames.add(colName)) {
+        throw new AnalysisException("Duplicate column name: " + colName);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
new file mode 100644
index 0000000..c733ca0
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableChangeColStmt.java
@@ -0,0 +1,101 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableChangeColParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE CHANGE COLUMN colName newColDef statement.
+ * Note: It would be fairly simple to reuse this class to support ALTER TABLE MODIFY
+ * newColDef statements in the future my making colName optional.
+ */
+public class AlterTableChangeColStmt extends AlterTableStmt {
+  private final String colName_;
+  private final ColumnDef newColDef_;
+
+  public AlterTableChangeColStmt(TableName tableName, String colName,
+      ColumnDef newColDef) {
+    super(tableName);
+    Preconditions.checkNotNull(newColDef);
+    Preconditions.checkState(colName != null && !colName.isEmpty());
+    colName_ = colName;
+    newColDef_ = newColDef;
+  }
+
+  public String getColName() { return colName_; }
+  public ColumnDef getNewColDef() { return newColDef_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.CHANGE_COLUMN);
+    TAlterTableChangeColParams colParams = new TAlterTableChangeColParams();
+    colParams.setCol_name(colName_);
+    colParams.setNew_col_def(newColDef_.toThrift());
+    params.setChange_col_params(colParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support column-level DDL on HBase tables. Requires updating the column
+    // mappings along with the table columns.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE CHANGE COLUMN not currently supported " +
+          "on HBase tables.");
+    }
+    String tableName = getDb() + "." + getTbl();
+
+    // Verify there are no conflicts with partition columns.
+    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
+      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
+        throw new AnalysisException("Cannot modify partition column: " + colName_);
+      }
+      if (fs.getName().toLowerCase().equals(newColDef_.getColName().toLowerCase())) {
+        throw new AnalysisException(
+            "Column name conflicts with existing partition column: " +
+            newColDef_.getColName());
+      }
+    }
+
+    // Verify the column being modified exists in the table
+    if (t.getColumn(colName_) == null) {
+      throw new AnalysisException(String.format(
+          "Column '%s' does not exist in table: %s", colName_, tableName));
+    }
+
+    // Check that the new column def's name is valid.
+    newColDef_.analyze();
+    // Verify that if the column name is being changed, the new name doesn't conflict
+    // with an existing column.
+    if (!colName_.toLowerCase().equals(newColDef_.getColName().toLowerCase()) &&
+        t.getColumn(newColDef_.getColName()) != null) {
+      throw new AnalysisException("Column already exists: " + newColDef_.getColName());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
new file mode 100644
index 0000000..d7f5ab5
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
@@ -0,0 +1,84 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+
+import com.cloudera.impala.catalog.HBaseTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableDropColParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE DROP COLUMN statement.
+ * Note: Hive does not support this syntax for droppping columns, but it is supported
+ * by mysql.
+ */
+public class AlterTableDropColStmt extends AlterTableStmt {
+  private final String colName_;
+
+  public AlterTableDropColStmt(TableName tableName, String colName) {
+    super(tableName);
+    Preconditions.checkState(colName != null && !colName.isEmpty());
+    colName_ = colName;
+  }
+
+  public String getColName() { return colName_; }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.DROP_COLUMN);
+    TAlterTableDropColParams dropColParams = new TAlterTableDropColParams(colName_);
+    params.setDrop_col_params(dropColParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    Table t = getTargetTable();
+    // TODO: Support column-level DDL on HBase tables. Requires updating the column
+    // mappings along with the table columns.
+    if (t instanceof HBaseTable) {
+      throw new AnalysisException("ALTER TABLE DROP COLUMN not currently supported " +
+          "on HBase tables.");
+    }
+    String tableName = getDb() + "." + getTbl();
+
+    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
+      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
+        throw new AnalysisException("Cannot drop partition column: " + fs.getName());
+      }
+    }
+
+    if (t.getColumns().size() - t.getMetaStoreTable().getPartitionKeysSize() <= 1) {
+      throw new AnalysisException(String.format(
+          "Cannot drop column '%s' from %s. Tables must contain at least 1 column.",
+          colName_, tableName));
+    }
+
+    if (t.getColumn(colName_) == null) {
+      throw new AnalysisException(String.format(
+          "Column '%s' does not exist in table: %s", colName_, tableName));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
new file mode 100644
index 0000000..f8bc09c
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropPartitionStmt.java
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE DROP PARTITION statement.
+ */
+public class AlterTableDropPartitionStmt extends AlterTableStmt {
+  private final boolean ifExists_;
+  private final PartitionSpec partitionSpec_;
+
+  // Setting this value causes dropped partition(s) to be permanently
+  // deleted. For example, for HDFS tables it skips the trash mechanism
+  private final boolean purgePartition_;
+
+  public AlterTableDropPartitionStmt(TableName tableName,
+      PartitionSpec partitionSpec, boolean ifExists, boolean purgePartition) {
+    super(tableName);
+    Preconditions.checkNotNull(partitionSpec);
+    partitionSpec_ = partitionSpec;
+    partitionSpec_.setTableName(tableName);
+    ifExists_ = ifExists;
+    purgePartition_ = purgePartition;
+  }
+
+  public boolean getIfNotExists() { return ifExists_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
+    sb.append(" DROP ");
+    if (ifExists_) sb.append("IF EXISTS ");
+    sb.append(" DROP " + partitionSpec_.toSql());
+    if (purgePartition_) sb.append(" PURGE");
+    return sb.toString();
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.DROP_PARTITION);
+    TAlterTableDropPartitionParams addPartParams = new TAlterTableDropPartitionParams();
+    addPartParams.setPartition_spec(partitionSpec_.toThrift());
+    addPartParams.setIf_exists(ifExists_);
+    addPartParams.setPurge(purgePartition_);
+    params.setDrop_partition_params(addPartParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    if (!ifExists_) partitionSpec_.setPartitionShouldExist();
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
new file mode 100644
index 0000000..009535c
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewRenameStmt.java
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.View;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAccessEvent;
+import com.cloudera.impala.thrift.TAlterTableOrViewRenameParams;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.cloudera.impala.thrift.TTableName;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE/VIEW RENAME statement.
+ */
+public class AlterTableOrViewRenameStmt extends AlterTableStmt {
+  protected final TableName newTableName_;
+
+  // Set during analysis
+  protected String newDbName_;
+
+  //  True if we are renaming a table. False if we are renaming a view.
+  protected final boolean renameTable_;
+
+  public AlterTableOrViewRenameStmt(TableName oldTableName, TableName newTableName,
+      boolean renameTable) {
+    super(oldTableName);
+    Preconditions.checkState(newTableName != null && !newTableName.isEmpty());
+    newTableName_ = newTableName;
+    renameTable_ = renameTable;
+  }
+
+  public String getNewTbl() {
+    return newTableName_.getTbl();
+  }
+
+  public String getNewDb() {
+    Preconditions.checkNotNull(newDbName_);
+    return newDbName_;
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(
+        (renameTable_) ? TAlterTableType.RENAME_TABLE : TAlterTableType.RENAME_VIEW);
+    TAlterTableOrViewRenameParams renameParams =
+        new TAlterTableOrViewRenameParams(new TTableName(getNewDb(), getNewTbl()));
+    params.setRename_params(renameParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    newTableName_.analyze();
+    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
+    if (table_ instanceof View && renameTable_) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
+    }
+    if (!(table_ instanceof View) && !renameTable_) {
+      throw new AnalysisException(String.format(
+          "ALTER VIEW not allowed on a table: %s", table_.getFullName()));
+    }
+    newDbName_ = analyzer.getTargetDbName(newTableName_);
+    if (analyzer.dbContainsTable(newDbName_, newTableName_.getTbl(), Privilege.CREATE)) {
+      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
+          String.format("%s.%s", newDbName_, getNewTbl()));
+    }
+    analyzer.addAccessEvent(new TAccessEvent(newDbName_ + "." + newTableName_.getTbl(),
+        table_.getCatalogObjectType(), Privilege.CREATE.toString()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
new file mode 100644
index 0000000..c7e796c
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableRecoverPartitionsStmt.java
@@ -0,0 +1,56 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+
+/**
+ * Represents an ALTER TABLE RECOVER PARTITIONS statement.
+ */
+public class AlterTableRecoverPartitionsStmt extends AlterTableStmt {
+
+  public AlterTableRecoverPartitionsStmt(TableName tableName) {
+    super(tableName);
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.RECOVER_PARTITIONS);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+
+    // Make sure the target table is HdfsTable.
+    if (!(table_ instanceof HdfsTable)) {
+      throw new AnalysisException("ALTER TABLE RECOVER PARTITIONS " +
+          "must target an HDFS table: " + tableName_);
+    }
+
+    // Make sure the target table is partitioned.
+    if (table_.getMetaStoreTable().getPartitionKeysSize() == 0) {
+      throw new AnalysisException("Table is not partitioned: " + tableName_);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
new file mode 100644
index 0000000..f63cf3e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetCachedStmt.java
@@ -0,0 +1,92 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.analysis;
+
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.HdfsPartition;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.common.AnalysisException;
+import com.cloudera.impala.thrift.TAlterTableParams;
+import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
+import com.cloudera.impala.thrift.TAlterTableType;
+import com.google.common.base.Preconditions;
+
+/**
+ * Represents an ALTER TABLE [PARTITION partitionSpec] SET [UNCACHED|CACHED 'pool'].
+ */
+public class AlterTableSetCachedStmt extends AlterTableSetStmt {
+  private final HdfsCachingOp cacheOp_;
+
+  public AlterTableSetCachedStmt(TableName tableName,
+      PartitionSpec partitionSpec, HdfsCachingOp cacheOp) {
+    super(tableName, partitionSpec);
+    Preconditions.checkNotNull(cacheOp);
+    cacheOp_ = cacheOp;
+  }
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.SET_CACHED);
+    TAlterTableSetCachedParams cachingParams =
+        new TAlterTableSetCachedParams();
+    if (getPartitionSpec() != null) {
+      cachingParams.setPartition_spec(getPartitionSpec().toThrift());
+    }
+    cachingParams.setCache_op(cacheOp_.toThrift());
+    params.setSet_cached_params(cachingParams);
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    super.analyze(analyzer);
+    cacheOp_.analyze(analyzer);
+
+    Table table = getTargetTable();
+    Preconditions.checkNotNull(table);
+    if (!(table instanceof HdfsTable)) {
+      throw new AnalysisException("ALTER TABLE SET [CACHED|UNCACHED] must target an " +
+          "HDFS table: " + table.getFullName());
+    }
+
+    if (cacheOp_.shouldCache()) {
+      boolean isCacheable;
+      PartitionSpec partSpec = getPartitionSpec();
+      HdfsTable hdfsTable = (HdfsTable)table;
+      StringBuilder nameSb = new StringBuilder();
+      if (partSpec != null) {
+        HdfsPartition part = hdfsTable.getPartition(partSpec.getPartitionSpecKeyValues());
+        if (part == null) {
+          throw new AnalysisException("Partition spec does not exist: " +
+              partSpec.toSql());
+        }
+        isCacheable = part.isCacheable();
+        nameSb.append("Partition (" + part.getPartitionName() + ")");
+      } else {
+        isCacheable = hdfsTable.isCacheable();
+        nameSb.append("Table ").append(table.getFullName());
+      }
+      if (!isCacheable) {
+        throw new AnalysisException(nameSb.toString() + " cannot be cached. Please " +
+            "check if the table or partitions are on a filesystem which supports " +
+            "caching.");
+      }
+    }
+  }
+}