You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/09/30 02:15:07 UTC

[50/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
----------------------------------------------------------------------
diff --git a/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java b/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
new file mode 100644
index 0000000..a6a731b
--- /dev/null
+++ b/ext-data-source/test/src/main/java/org/apache/impala/extdatasource/AllTypesDataSource.java
@@ -0,0 +1,323 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.extdatasource;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.Timestamp;
+import java.util.List;
+import java.util.UUID;
+
+import com.cloudera.impala.extdatasource.thrift.TBinaryPredicate;
+import com.cloudera.impala.extdatasource.thrift.TCloseParams;
+import com.cloudera.impala.extdatasource.thrift.TCloseResult;
+import com.cloudera.impala.extdatasource.thrift.TColumnDesc;
+import com.cloudera.impala.extdatasource.thrift.TGetNextParams;
+import com.cloudera.impala.extdatasource.thrift.TGetNextResult;
+import com.cloudera.impala.extdatasource.thrift.TOpenParams;
+import com.cloudera.impala.extdatasource.thrift.TOpenResult;
+import com.cloudera.impala.extdatasource.thrift.TPrepareParams;
+import com.cloudera.impala.extdatasource.thrift.TPrepareResult;
+import com.cloudera.impala.extdatasource.thrift.TRowBatch;
+import com.cloudera.impala.extdatasource.thrift.TTableSchema;
+import com.cloudera.impala.extdatasource.util.SerializationUtils;
+import com.cloudera.impala.extdatasource.v1.ExternalDataSource;
+import com.cloudera.impala.thrift.TColumnData;
+import com.cloudera.impala.thrift.TColumnType;
+import com.cloudera.impala.thrift.TPrimitiveType;
+import com.cloudera.impala.thrift.TScalarType;
+import com.cloudera.impala.thrift.TStatus;
+import com.cloudera.impala.thrift.TErrorCode;
+import com.cloudera.impala.thrift.TTypeNodeType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+/**
+ * Data source implementation for tests that can:
+ * (a) Accepts every other offered conjunct for testing planning (though predicates are
+ *     not actually evaluated) and returns trivial data of all supported types for
+ *     query tests.
+ * (b) Validate the predicates offered by Impala.
+ */
+public class AllTypesDataSource implements ExternalDataSource {
+  // Total number of rows to return
+  private static final int NUM_ROWS_RETURNED = 5000;
+
+  // Change the size of the batches that are returned
+  private static final int INITIAL_BATCH_SIZE = 500;
+  private static final int BATCH_SIZE_INCREMENT = 100;
+
+  private static final TStatus STATUS_OK =
+      new TStatus(TErrorCode.OK, Lists.<String>newArrayList());
+
+  private int currRow_;
+  private boolean eos_;
+  private int batchSize_;
+  private TTableSchema schema_;
+  private DataSourceState state_;
+  private String scanHandle_;
+  private String validatePredicatesResult_;
+
+  // Enumerates the states of the data source.
+  private enum DataSourceState {
+    CREATED,
+    OPENED,
+    CLOSED
+  }
+
+  public AllTypesDataSource() {
+    eos_ = false;
+    currRow_ = 0;
+    state_ = DataSourceState.CREATED;
+  }
+
+  /**
+   * Accepts every other conjunct and returns the constant number of rows that
+   * is always returned.
+   */
+  @Override
+  public TPrepareResult prepare(TPrepareParams params) {
+    Preconditions.checkState(state_ == DataSourceState.CREATED);
+    List<Integer> accepted = Lists.newArrayList();
+    int numRowsReturned = 0;
+    if (validatePredicates(params.getPredicates())) {
+      // Indicate all predicates are applied because we return a dummy row with the
+      // result later to validate the result in tests. Impala shouldn't try to apply
+      // predicates to that dummy row.
+      for (int i = 0; i < params.getPredicatesSize(); ++i) accepted.add(i);
+      numRowsReturned = 1;
+    } else {
+      // Default behavior is to accept every other predicate. They are not actually
+      // applied, but we want to validate that Impala applies the correct predicates.
+      for (int i = 0; i < params.getPredicatesSize(); ++i) {
+        if (i % 2 == 0) accepted.add(i);
+      }
+      numRowsReturned = NUM_ROWS_RETURNED;
+    }
+    return new TPrepareResult(STATUS_OK)
+      .setAccepted_conjuncts(accepted)
+      .setNum_rows_estimate(numRowsReturned);
+  }
+
+  /**
+   * If the predicate value (assuming STRING) starts with 'VALIDATE_PREDICATES##',
+   * we validate the TPrepareParams.predicates against predicates specified after the
+   * 'VALIDATE_PREDICATES##' and return true. The result of the validation is stored
+   * in validatePredicatesResult_.
+   *
+   * The expected predicates are specified in the form "{slot} {TComparisonOp} {val}",
+   * and conjunctive predicates are separated by '&&'.
+   *
+   * For example, the predicates_spec validates the predicates in the following query:
+   *    select * from table_name
+   *    where predicates_spec = 'x LT 1 && y GT 2' and
+   *          x < 1 and
+   *          2 > y;
+   *
+   * Current limitations:
+   *  - Disjunctive predicates are not supported (e.g. "expr1 or expr2")
+   *  - Only INT is supported
+   */
+  private boolean validatePredicates(List<List<TBinaryPredicate>> predicates) {
+    if (predicates == null || predicates.isEmpty()) return false;
+    TBinaryPredicate firstPredicate = predicates.get(0).get(0);
+    if (!firstPredicate.getValue().isSetString_val()) return false;
+    String colVal = firstPredicate.getValue().getString_val();
+    if (!colVal.toUpperCase().startsWith("VALIDATE_PREDICATES##")) return false;
+
+    String[] colValParts = colVal.split("##");
+    Preconditions.checkArgument(colValParts.length == 2);
+    String[] expectedPredicates = colValParts[1].split("&&");
+    Preconditions.checkArgument(expectedPredicates.length == predicates.size() - 1);
+
+    String result = "SUCCESS";
+    for (int i = 1; i < predicates.size(); ++i) {
+      String[] predicateParts = expectedPredicates[i - 1].trim().split(" ");
+      Preconditions.checkArgument(predicateParts.length == 3);
+      TBinaryPredicate predicate =
+          Iterables.getOnlyElement(predicates.get(i));
+      Preconditions.checkArgument(predicate.getValue().isSetInt_val());
+
+      String slotName = predicate.getCol().getName().toUpperCase();
+      int intVal = predicate.getValue().getInt_val();
+      if (!predicateParts[0].toUpperCase().equals(slotName) ||
+          !predicateParts[1].toUpperCase().equals(predicate.getOp().name()) ||
+          !predicateParts[2].equals(Integer.toString(intVal))) {
+        result = "Failed predicate, expected=" + expectedPredicates[i - 1].trim() +
+            " actual=" + predicate.toString();
+      }
+    }
+    validatePredicatesResult_ = result;
+    return true;
+  }
+
+  /**
+   * Initializes the batch size and stores the table schema.
+   */
+  @Override
+  public TOpenResult open(TOpenParams params) {
+    Preconditions.checkState(state_ == DataSourceState.CREATED);
+    state_ = DataSourceState.OPENED;
+    batchSize_ = INITIAL_BATCH_SIZE;
+    schema_ = params.getRow_schema();
+    // Need to check validatePredicates again because the call in Prepare() was from
+    // the frontend and used a different instance of this data source class.
+    if (validatePredicates(params.getPredicates())) {
+      // If validating predicates, only one STRING column should be selected.
+      Preconditions.checkArgument(schema_.getColsSize() == 1);
+      TColumnDesc firstCol = schema_.getCols().get(0);
+      TColumnType firstType = firstCol.getType();
+      Preconditions.checkState(firstType.getTypesSize() == 1);
+      Preconditions.checkState(firstType.types.get(0).getType() == TTypeNodeType.SCALAR);
+      Preconditions.checkArgument(
+          firstType.types.get(0).scalar_type.getType() == TPrimitiveType.STRING);
+    }
+    scanHandle_ = UUID.randomUUID().toString();
+    return new TOpenResult(STATUS_OK).setScan_handle(scanHandle_);
+  }
+
+  /**
+   * If validating predicates, returns a single row with the result of the validation.
+   * Otherwise returns row batches with generated rows based on the row index. Called
+   * multiple times, so the current row is stored between calls. Each row batch is a
+   * different size (not necessarily the size specified by TOpenParams.batch_size to
+   * ensure that Impala can handle unexpected batch sizes.
+   */
+  @Override
+  public TGetNextResult getNext(TGetNextParams params) {
+    Preconditions.checkState(state_ == DataSourceState.OPENED);
+    Preconditions.checkArgument(params.getScan_handle().equals(scanHandle_));
+    if (eos_) return new TGetNextResult(STATUS_OK).setEos(eos_);
+
+    if (validatePredicatesResult_ != null) {
+      TColumnData colData = new TColumnData();
+      colData.setIs_null(Lists.newArrayList(false));
+      colData.setString_vals(Lists.newArrayList(validatePredicatesResult_));
+      eos_ = true;
+      return new TGetNextResult(STATUS_OK).setEos(eos_)
+          .setRows(new TRowBatch().setCols(Lists.newArrayList(colData)).setNum_rows(1));
+    }
+
+    List<TColumnData> cols = Lists.newArrayList();
+    for (int i = 0; i < schema_.getColsSize(); ++i) {
+      cols.add(new TColumnData().setIs_null(Lists.<Boolean>newArrayList()));
+    }
+
+    int numAdded = 0;
+    while (currRow_ < NUM_ROWS_RETURNED && numAdded < batchSize_) {
+      addRow(cols);
+      ++numAdded;
+      ++currRow_;
+    }
+
+    batchSize_ += BATCH_SIZE_INCREMENT;
+    if (currRow_ == NUM_ROWS_RETURNED) eos_ = true;
+    return new TGetNextResult(STATUS_OK).setEos(eos_)
+        .setRows(new TRowBatch().setCols(cols).setNum_rows(numAdded));
+  }
+
+  /**
+   * Adds a row to the set of columns. For all numeric types the value is set to the
+   * row index (mod the size for integer types). For strings it is just a string
+   * containing the row index and every 5th result is null.
+   */
+  private void addRow(List<TColumnData> cols) {
+    for (int i = 0; i < cols.size(); ++i) {
+      TColumnDesc colDesc = schema_.getCols().get(i);
+      TColumnData colData = cols.get(i);
+      TColumnType type = colDesc.getType();
+      if (type.types.get(0).getType() != TTypeNodeType.SCALAR) {
+        // Unsupported non-scalar type.
+        throw new UnsupportedOperationException("Unsupported column type: " +
+            type.types.get(0).getType());
+      }
+      Preconditions.checkState(type.getTypesSize() == 1);
+      TScalarType scalarType = type.types.get(0).scalar_type;
+      switch (scalarType.type) {
+        case TINYINT:
+          colData.addToIs_null(false);
+          colData.addToByte_vals((byte) (currRow_ % 10));
+          break;
+        case SMALLINT:
+          colData.addToIs_null(false);
+          colData.addToShort_vals((short) (currRow_ % 100));
+          break;
+        case INT:
+          colData.addToIs_null(false);
+          colData.addToInt_vals(currRow_);
+          break;
+        case BIGINT:
+          colData.addToIs_null(false);
+          colData.addToLong_vals((long) currRow_ * 10);
+          break;
+        case DOUBLE:
+          colData.addToIs_null(false);
+          colData.addToDouble_vals(currRow_);
+          break;
+        case FLOAT:
+          colData.addToIs_null(false);
+          colData.addToDouble_vals((float) (1.1 * currRow_));
+          break;
+        case STRING:
+          if (currRow_ % 5 == 0) {
+            colData.addToIs_null(true);
+          } else {
+            colData.addToIs_null(false);
+            colData.addToString_vals(String.valueOf(currRow_));
+          }
+          break;
+        case BOOLEAN:
+          colData.addToIs_null(false);
+          colData.addToBool_vals(currRow_ % 2 == 0);
+          break;
+        case TIMESTAMP:
+          colData.addToIs_null(false);
+          colData.addToBinary_vals(
+            SerializationUtils.encodeTimestamp(new Timestamp(currRow_)));
+          break;
+        case DECIMAL:
+          colData.addToIs_null(false);
+          BigInteger maxUnscaled = BigInteger.TEN.pow(scalarType.getPrecision());
+          BigInteger val = maxUnscaled.subtract(BigInteger.valueOf(currRow_ + 1));
+          val = val.mod(maxUnscaled);
+          if (currRow_ % 2 == 0) val = val.negate();
+          colData.addToBinary_vals(SerializationUtils.encodeDecimal(new BigDecimal(val)));
+          break;
+        case BINARY:
+        case CHAR:
+        case DATE:
+        case DATETIME:
+        case INVALID_TYPE:
+        case NULL_TYPE:
+        default:
+          // Unsupported.
+          throw new UnsupportedOperationException("Unsupported column type: " +
+              scalarType.getType());
+      }
+    }
+  }
+
+  @Override
+  public TCloseResult close(TCloseParams params) {
+    Preconditions.checkState(state_ == DataSourceState.OPENED);
+    Preconditions.checkArgument(params.getScan_handle().equals(scanHandle_));
+    state_ = DataSourceState.CLOSED;
+    return new TCloseResult(STATUS_OK);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java b/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java
deleted file mode 100644
index e8e9445..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfo.java
+++ /dev/null
@@ -1,742 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.Type;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.planner.DataPartition;
-import com.cloudera.impala.thrift.TPartitionType;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Encapsulates all the information needed to compute the aggregate functions of a single
- * Select block, including a possible 2nd phase aggregation step for DISTINCT aggregate
- * functions and merge aggregation steps needed for distributed execution.
- *
- * The latter requires a tree structure of AggregateInfo objects which express the
- * original aggregate computations as well as the necessary merging aggregate
- * computations.
- * TODO: get rid of this by transforming
- *   SELECT COUNT(DISTINCT a, b, ..) GROUP BY x, y, ...
- * into an equivalent query with a inline view:
- *   SELECT COUNT(*) FROM (SELECT DISTINCT a, b, ..., x, y, ...) GROUP BY x, y, ...
- *
- * The tree structure looks as follows:
- * - for non-distinct aggregation:
- *   - aggInfo: contains the original aggregation functions and grouping exprs
- *   - aggInfo.mergeAggInfo: contains the merging aggregation functions (grouping
- *     exprs are identical)
- * - for distinct aggregation (for an explanation of the phases, see
- *   SelectStmt.createDistinctAggInfo()):
- *   - aggInfo: contains the phase 1 aggregate functions and grouping exprs
- *   - aggInfo.2ndPhaseDistinctAggInfo: contains the phase 2 aggregate functions and
- *     grouping exprs
- *   - aggInfo.mergeAggInfo: contains the merging aggregate functions for the phase 1
- *     computation (grouping exprs are identical)
- *   - aggInfo.2ndPhaseDistinctAggInfo.mergeAggInfo: contains the merging aggregate
- *     functions for the phase 2 computation (grouping exprs are identical)
- *
- * In general, merging aggregate computations are idempotent; in other words,
- * aggInfo.mergeAggInfo == aggInfo.mergeAggInfo.mergeAggInfo.
- *
- * TODO: move the merge construction logic from SelectStmt into AggregateInfo
- * TODO: Add query tests for aggregation with intermediate tuples with num_nodes=1.
- */
-public class AggregateInfo extends AggregateInfoBase {
-  private final static Logger LOG = LoggerFactory.getLogger(AggregateInfo.class);
-
-  public enum AggPhase {
-    FIRST,
-    FIRST_MERGE,
-    SECOND,
-    SECOND_MERGE;
-
-    public boolean isMerge() { return this == FIRST_MERGE || this == SECOND_MERGE; }
-  };
-
-  // created by createMergeAggInfo()
-  private AggregateInfo mergeAggInfo_;
-
-  // created by createDistinctAggInfo()
-  private AggregateInfo secondPhaseDistinctAggInfo_;
-
-  private final AggPhase aggPhase_;
-
-  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
-  // in the intermediate tuple. Identical to outputTupleSmap_ if no aggregateExpr has an
-  // output type that is different from its intermediate type.
-  protected ExprSubstitutionMap intermediateTupleSmap_ = new ExprSubstitutionMap();
-
-  // Map from all grouping and aggregate exprs to a SlotRef referencing the corresp. slot
-  // in the output tuple.
-  protected ExprSubstitutionMap outputTupleSmap_ = new ExprSubstitutionMap();
-
-  // Map from slots of outputTupleSmap_ to the corresponding slot in
-  // intermediateTupleSmap_.
-  protected ExprSubstitutionMap outputToIntermediateTupleSmap_ =
-      new ExprSubstitutionMap();
-
-  // if set, a subset of groupingExprs_; set and used during planning
-  private List<Expr> partitionExprs_;
-
-  // C'tor creates copies of groupingExprs and aggExprs.
-  private AggregateInfo(ArrayList<Expr> groupingExprs,
-      ArrayList<FunctionCallExpr> aggExprs, AggPhase aggPhase)  {
-    super(groupingExprs, aggExprs);
-    aggPhase_ = aggPhase;
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  private AggregateInfo(AggregateInfo other) {
-    super(other);
-    if (other.mergeAggInfo_ != null) {
-      mergeAggInfo_ = other.mergeAggInfo_.clone();
-    }
-    if (other.secondPhaseDistinctAggInfo_ != null) {
-      secondPhaseDistinctAggInfo_ = other.secondPhaseDistinctAggInfo_.clone();
-    }
-    aggPhase_ = other.aggPhase_;
-    outputTupleSmap_ = other.outputTupleSmap_.clone();
-    if (other.requiresIntermediateTuple()) {
-      intermediateTupleSmap_ = other.intermediateTupleSmap_.clone();
-    } else {
-      Preconditions.checkState(other.intermediateTupleDesc_ == other.outputTupleDesc_);
-      intermediateTupleSmap_ = outputTupleSmap_;
-    }
-    partitionExprs_ =
-        (other.partitionExprs_ != null) ? Expr.cloneList(other.partitionExprs_) : null;
-  }
-
-  public List<Expr> getPartitionExprs() { return partitionExprs_; }
-  public void setPartitionExprs(List<Expr> exprs) { partitionExprs_ = exprs; }
-
-  /**
-   * Creates complete AggregateInfo for groupingExprs and aggExprs, including
-   * aggTupleDesc and aggTupleSMap. If parameter tupleDesc != null, sets aggTupleDesc to
-   * that instead of creating a new descriptor (after verifying that the passed-in
-   * descriptor is correct for the given aggregation).
-   * Also creates mergeAggInfo and secondPhaseDistinctAggInfo, if needed.
-   * If an aggTupleDesc is created, also registers eq predicates between the
-   * grouping exprs and their respective slots with 'analyzer'.
-   */
-  static public AggregateInfo create(
-      ArrayList<Expr> groupingExprs, ArrayList<FunctionCallExpr> aggExprs,
-      TupleDescriptor tupleDesc, Analyzer analyzer)
-          throws AnalysisException {
-    Preconditions.checkState(
-        (groupingExprs != null && !groupingExprs.isEmpty())
-        || (aggExprs != null && !aggExprs.isEmpty()));
-    Expr.removeDuplicates(groupingExprs);
-    Expr.removeDuplicates(aggExprs);
-    AggregateInfo result = new AggregateInfo(groupingExprs, aggExprs, AggPhase.FIRST);
-
-    // collect agg exprs with DISTINCT clause
-    ArrayList<FunctionCallExpr> distinctAggExprs = Lists.newArrayList();
-    if (aggExprs != null) {
-      for (FunctionCallExpr aggExpr: aggExprs) {
-        if (aggExpr.isDistinct()) distinctAggExprs.add(aggExpr);
-      }
-    }
-
-    if (distinctAggExprs.isEmpty()) {
-      if (tupleDesc == null) {
-        result.createTupleDescs(analyzer);
-        result.createSmaps(analyzer);
-      } else {
-        // A tupleDesc should only be given for UNION DISTINCT.
-        Preconditions.checkState(aggExprs == null);
-        result.outputTupleDesc_ = tupleDesc;
-        result.intermediateTupleDesc_ = tupleDesc;
-      }
-      result.createMergeAggInfo(analyzer);
-    } else {
-      // we don't allow you to pass in a descriptor for distinct aggregation
-      // (we need two descriptors)
-      Preconditions.checkState(tupleDesc == null);
-      result.createDistinctAggInfo(groupingExprs, distinctAggExprs, analyzer);
-    }
-    LOG.debug("agg info:\n" + result.debugString());
-    return result;
-  }
-
-  /**
-   * Create aggregate info for select block containing aggregate exprs with
-   * DISTINCT clause.
-   * This creates:
-   * - aggTupleDesc
-   * - a complete secondPhaseDistinctAggInfo
-   * - mergeAggInfo
-   *
-   * At the moment, we require that all distinct aggregate
-   * functions be applied to the same set of exprs (ie, we can't do something
-   * like SELECT COUNT(DISTINCT id), COUNT(DISTINCT address)).
-   * Aggregation happens in two successive phases:
-   * - the first phase aggregates by all grouping exprs plus all parameter exprs
-   *   of DISTINCT aggregate functions
-   *
-   * Example:
-   *   SELECT a, COUNT(DISTINCT b, c), MIN(d), COUNT(*) FROM T GROUP BY a
-   * - 1st phase grouping exprs: a, b, c
-   * - 1st phase agg exprs: MIN(d), COUNT(*)
-   * - 2nd phase grouping exprs: a
-   * - 2nd phase agg exprs: COUNT(*), MIN(<MIN(d) from 1st phase>),
-   *     SUM(<COUNT(*) from 1st phase>)
-   *
-   * TODO: expand implementation to cover the general case; this will require
-   * a different execution strategy
-   */
-  private void createDistinctAggInfo(
-      ArrayList<Expr> origGroupingExprs,
-      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
-          throws AnalysisException {
-    Preconditions.checkState(!distinctAggExprs.isEmpty());
-    // make sure that all DISTINCT params are the same;
-    // ignore top-level implicit casts in the comparison, we might have inserted
-    // those during analysis
-    ArrayList<Expr> expr0Children = Lists.newArrayList();
-
-    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
-        "group_concat")) {
-      // Ignore separator parameter, otherwise the same would have to be present for all
-      // other distinct aggregates as well.
-      // TODO: Deal with constant exprs more generally, instead of special-casing
-      // group_concat().
-      expr0Children.add(distinctAggExprs.get(0).getChild(0).ignoreImplicitCast());
-    } else {
-      for (Expr expr : distinctAggExprs.get(0).getChildren()) {
-        expr0Children.add(expr.ignoreImplicitCast());
-      }
-    }
-    for (int i = 1; i < distinctAggExprs.size(); ++i) {
-      ArrayList<Expr> exprIChildren = Lists.newArrayList();
-      if (distinctAggExprs.get(i).getFnName().getFunction().equalsIgnoreCase(
-          "group_concat")) {
-        exprIChildren.add(distinctAggExprs.get(i).getChild(0).ignoreImplicitCast());
-      } else {
-        for (Expr expr : distinctAggExprs.get(i).getChildren()) {
-          exprIChildren.add(expr.ignoreImplicitCast());
-        }
-      }
-      if (!Expr.equalLists(expr0Children, exprIChildren)) {
-        throw new AnalysisException(
-            "all DISTINCT aggregate functions need to have the same set of "
-            + "parameters as " + distinctAggExprs.get(0).toSql()
-            + "; deviating function: " + distinctAggExprs.get(i).toSql());
-      }
-    }
-
-    // add DISTINCT parameters to grouping exprs
-    groupingExprs_.addAll(expr0Children);
-
-    // remove DISTINCT aggregate functions from aggExprs
-    aggregateExprs_.removeAll(distinctAggExprs);
-
-    createTupleDescs(analyzer);
-    createSmaps(analyzer);
-    createMergeAggInfo(analyzer);
-    createSecondPhaseAggInfo(origGroupingExprs, distinctAggExprs, analyzer);
-  }
-
-  public AggregateInfo getMergeAggInfo() { return mergeAggInfo_; }
-  public AggregateInfo getSecondPhaseDistinctAggInfo() {
-    return secondPhaseDistinctAggInfo_;
-  }
-  public AggPhase getAggPhase() { return aggPhase_; }
-  public boolean isMerge() { return aggPhase_.isMerge(); }
-  public boolean isDistinctAgg() { return secondPhaseDistinctAggInfo_ != null; }
-  public ExprSubstitutionMap getIntermediateSmap() { return intermediateTupleSmap_; }
-  public ExprSubstitutionMap getOutputSmap() { return outputTupleSmap_; }
-  public ExprSubstitutionMap getOutputToIntermediateSmap() {
-    return outputToIntermediateTupleSmap_;
-  }
-
-  public boolean hasAggregateExprs() {
-    return !aggregateExprs_.isEmpty() ||
-        (secondPhaseDistinctAggInfo_ != null &&
-         !secondPhaseDistinctAggInfo_.getAggregateExprs().isEmpty());
-  }
-
-  /**
-   * Return the tuple id produced in the final aggregation step.
-   */
-  public TupleId getResultTupleId() {
-    if (isDistinctAgg()) return secondPhaseDistinctAggInfo_.getOutputTupleId();
-    return getOutputTupleId();
-  }
-
-  public ArrayList<FunctionCallExpr> getMaterializedAggregateExprs() {
-    ArrayList<FunctionCallExpr> result = Lists.newArrayList();
-    for (Integer i: materializedSlots_) {
-      result.add(aggregateExprs_.get(i));
-    }
-    return result;
-  }
-
-  /**
-   * Append ids of all slots that are being referenced in the process
-   * of performing the aggregate computation described by this AggregateInfo.
-   */
-  public void getRefdSlots(List<SlotId> ids) {
-    Preconditions.checkState(outputTupleDesc_ != null);
-    if (groupingExprs_ != null) {
-      Expr.getIds(groupingExprs_, null, ids);
-    }
-    Expr.getIds(aggregateExprs_, null, ids);
-    // The backend assumes that the entire aggTupleDesc is materialized
-    for (int i = 0; i < outputTupleDesc_.getSlots().size(); ++i) {
-      ids.add(outputTupleDesc_.getSlots().get(i).getId());
-    }
-  }
-
-  /**
-   * Substitute all the expressions (grouping expr, aggregate expr) and update our
-   * substitution map according to the given substitution map:
-   * - smap typically maps from tuple t1 to tuple t2 (example: the smap of an
-   *   inline view maps the virtual table ref t1 into a base table ref t2)
-   * - our grouping and aggregate exprs need to be substituted with the given
-   *   smap so that they also reference t2
-   * - aggTupleSMap needs to be recomputed to map exprs based on t2
-   *   onto our aggTupleDesc (ie, the left-hand side needs to be substituted with
-   *   smap)
-   * - mergeAggInfo: this is not affected, because
-   *   * its grouping and aggregate exprs only reference aggTupleDesc_
-   *   * its smap is identical to aggTupleSMap_
-   * - 2ndPhaseDistinctAggInfo:
-   *   * its grouping and aggregate exprs also only reference aggTupleDesc_
-   *     and are therefore not affected
-   *   * its smap needs to be recomputed to map exprs based on t2 to its own
-   *     aggTupleDesc
-   */
-  public void substitute(ExprSubstitutionMap smap, Analyzer analyzer)
-      throws InternalException {
-    groupingExprs_ = Expr.substituteList(groupingExprs_, smap, analyzer, false);
-    LOG.trace("AggInfo: grouping_exprs=" + Expr.debugString(groupingExprs_));
-
-    // The smap in this case should not substitute the aggs themselves, only
-    // their subexpressions.
-    List<Expr> substitutedAggs =
-        Expr.substituteList(aggregateExprs_, smap, analyzer, false);
-    aggregateExprs_.clear();
-    for (Expr substitutedAgg: substitutedAggs) {
-      aggregateExprs_.add((FunctionCallExpr) substitutedAgg);
-    }
-
-    LOG.trace("AggInfo: agg_exprs=" + Expr.debugString(aggregateExprs_));
-    outputTupleSmap_.substituteLhs(smap, analyzer);
-    intermediateTupleSmap_.substituteLhs(smap, analyzer);
-    if (secondPhaseDistinctAggInfo_ != null) {
-      secondPhaseDistinctAggInfo_.substitute(smap, analyzer);
-    }
-  }
-
-  /**
-   * Create the info for an aggregation node that merges its pre-aggregated inputs:
-   * - pre-aggregation is computed by 'this'
-   * - tuple desc and smap are the same as that of the input (we're materializing
-   *   the same logical tuple)
-   * - grouping exprs: slotrefs to the input's grouping slots
-   * - aggregate exprs: aggregation of the input's aggregateExprs slots
-   *
-   * The returned AggregateInfo shares its descriptor and smap with the input info;
-   * createAggTupleDesc() must not be called on it.
-   */
-  private void createMergeAggInfo(Analyzer analyzer) {
-    Preconditions.checkState(mergeAggInfo_ == null);
-    TupleDescriptor inputDesc = intermediateTupleDesc_;
-    // construct grouping exprs
-    ArrayList<Expr> groupingExprs = Lists.newArrayList();
-    for (int i = 0; i < getGroupingExprs().size(); ++i) {
-      SlotRef slotRef = new SlotRef(inputDesc.getSlots().get(i));
-      groupingExprs.add(slotRef);
-    }
-
-    // construct agg exprs
-    ArrayList<FunctionCallExpr> aggExprs = Lists.newArrayList();
-    for (int i = 0; i < getAggregateExprs().size(); ++i) {
-      FunctionCallExpr inputExpr = getAggregateExprs().get(i);
-      Preconditions.checkState(inputExpr.isAggregateFunction());
-      Expr aggExprParam =
-          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
-      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
-          inputExpr, Lists.newArrayList(aggExprParam));
-      aggExpr.analyzeNoThrow(analyzer);
-      aggExprs.add(aggExpr);
-    }
-
-    AggPhase aggPhase =
-        (aggPhase_ == AggPhase.FIRST) ? AggPhase.FIRST_MERGE : AggPhase.SECOND_MERGE;
-    mergeAggInfo_ = new AggregateInfo(groupingExprs, aggExprs, aggPhase);
-    mergeAggInfo_.intermediateTupleDesc_ = intermediateTupleDesc_;
-    mergeAggInfo_.outputTupleDesc_ = outputTupleDesc_;
-    mergeAggInfo_.intermediateTupleSmap_ = intermediateTupleSmap_;
-    mergeAggInfo_.outputTupleSmap_ = outputTupleSmap_;
-    mergeAggInfo_.materializedSlots_ = materializedSlots_;
-  }
-
-  /**
-   * Creates an IF function call that returns NULL if any of the slots
-   * at indexes [firstIdx, lastIdx] return NULL.
-   * For example, the resulting IF function would like this for 3 slots:
-   * IF(IsNull(slot1), NULL, IF(IsNull(slot2), NULL, slot3))
-   * Returns null if firstIdx is greater than lastIdx.
-   * Returns a SlotRef to the last slot if there is only one slot in range.
-   */
-  private Expr createCountDistinctAggExprParam(int firstIdx, int lastIdx,
-      ArrayList<SlotDescriptor> slots) {
-    if (firstIdx > lastIdx) return null;
-
-    Expr elseExpr = new SlotRef(slots.get(lastIdx));
-    if (firstIdx == lastIdx) return elseExpr;
-
-    for (int i = lastIdx - 1; i >= firstIdx; --i) {
-      ArrayList<Expr> ifArgs = Lists.newArrayList();
-      SlotRef slotRef = new SlotRef(slots.get(i));
-      // Build expr: IF(IsNull(slotRef), NULL, elseExpr)
-      Expr isNullPred = new IsNullPredicate(slotRef, false);
-      ifArgs.add(isNullPred);
-      ifArgs.add(new NullLiteral());
-      ifArgs.add(elseExpr);
-      elseExpr = new FunctionCallExpr("if", ifArgs);
-    }
-    return elseExpr;
-  }
-
-  /**
-   * Create the info for an aggregation node that computes the second phase of
-   * DISTINCT aggregate functions.
-   * (Refer to createDistinctAggInfo() for an explanation of the phases.)
-   * - 'this' is the phase 1 aggregation
-   * - grouping exprs are those of the original query (param origGroupingExprs)
-   * - aggregate exprs for the DISTINCT agg fns: these are aggregating the grouping
-   *   slots that were added to the original grouping slots in phase 1;
-   *   count is mapped to count(*) and sum is mapped to sum
-   * - other aggregate exprs: same as the non-DISTINCT merge case
-   *   (count is mapped to sum, everything else stays the same)
-   *
-   * This call also creates the tuple descriptor and smap for the returned AggregateInfo.
-   */
-  private void createSecondPhaseAggInfo(
-      ArrayList<Expr> origGroupingExprs,
-      ArrayList<FunctionCallExpr> distinctAggExprs, Analyzer analyzer)
-      throws AnalysisException {
-    Preconditions.checkState(secondPhaseDistinctAggInfo_ == null);
-    Preconditions.checkState(!distinctAggExprs.isEmpty());
-    // The output of the 1st phase agg is the 1st phase intermediate.
-    TupleDescriptor inputDesc = intermediateTupleDesc_;
-
-    // construct agg exprs for original DISTINCT aggregate functions
-    // (these aren't part of aggExprs_)
-    ArrayList<FunctionCallExpr> secondPhaseAggExprs = Lists.newArrayList();
-    for (FunctionCallExpr inputExpr: distinctAggExprs) {
-      Preconditions.checkState(inputExpr.isAggregateFunction());
-      FunctionCallExpr aggExpr = null;
-      if (inputExpr.getFnName().getFunction().equals("count")) {
-        // COUNT(DISTINCT ...) ->
-        // COUNT(IF(IsNull(<agg slot 1>), NULL, IF(IsNull(<agg slot 2>), NULL, ...)))
-        // We need the nested IF to make sure that we do not count
-        // column-value combinations if any of the distinct columns are NULL.
-        // This behavior is consistent with MySQL.
-        Expr ifExpr = createCountDistinctAggExprParam(origGroupingExprs.size(),
-            origGroupingExprs.size() + inputExpr.getChildren().size() - 1,
-            inputDesc.getSlots());
-        Preconditions.checkNotNull(ifExpr);
-        ifExpr.analyzeNoThrow(analyzer);
-        aggExpr = new FunctionCallExpr("count", Lists.newArrayList(ifExpr));
-      } else if (inputExpr.getFnName().getFunction().equals("group_concat")) {
-        // Syntax: GROUP_CONCAT([DISTINCT] expression [, separator])
-        ArrayList<Expr> exprList = Lists.newArrayList();
-        // Add "expression" parameter. Need to get it from the inputDesc's slots so the
-        // tuple reference is correct.
-        exprList.add(new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size())));
-        // Check if user provided a custom separator
-        if (inputExpr.getChildren().size() == 2) exprList.add(inputExpr.getChild(1));
-        aggExpr = new FunctionCallExpr(inputExpr.getFnName(), exprList);
-      } else {
-        // SUM(DISTINCT <expr>) -> SUM(<last grouping slot>);
-        // (MIN(DISTINCT ...) and MAX(DISTINCT ...) have their DISTINCT turned
-        // off during analysis, and AVG() is changed to SUM()/COUNT())
-        Expr aggExprParam =
-            new SlotRef(inputDesc.getSlots().get(origGroupingExprs.size()));
-        aggExpr = new FunctionCallExpr(inputExpr.getFnName(),
-            Lists.newArrayList(aggExprParam));
-      }
-      secondPhaseAggExprs.add(aggExpr);
-    }
-
-    // map all the remaining agg fns
-    for (int i = 0; i < aggregateExprs_.size(); ++i) {
-      FunctionCallExpr inputExpr = aggregateExprs_.get(i);
-      Preconditions.checkState(inputExpr.isAggregateFunction());
-      // we're aggregating an intermediate slot of the 1st agg phase
-      Expr aggExprParam =
-          new SlotRef(inputDesc.getSlots().get(i + getGroupingExprs().size()));
-      FunctionCallExpr aggExpr = FunctionCallExpr.createMergeAggCall(
-          inputExpr, Lists.newArrayList(aggExprParam));
-      secondPhaseAggExprs.add(aggExpr);
-    }
-    Preconditions.checkState(
-        secondPhaseAggExprs.size() == aggregateExprs_.size() + distinctAggExprs.size());
-
-    for (FunctionCallExpr aggExpr: secondPhaseAggExprs) {
-      aggExpr.analyzeNoThrow(analyzer);
-      Preconditions.checkState(aggExpr.isAggregateFunction());
-    }
-
-    ArrayList<Expr> substGroupingExprs =
-        Expr.substituteList(origGroupingExprs, intermediateTupleSmap_, analyzer, false);
-    secondPhaseDistinctAggInfo_ =
-        new AggregateInfo(substGroupingExprs, secondPhaseAggExprs, AggPhase.SECOND);
-    secondPhaseDistinctAggInfo_.createTupleDescs(analyzer);
-    secondPhaseDistinctAggInfo_.createSecondPhaseAggSMap(this, distinctAggExprs);
-    secondPhaseDistinctAggInfo_.createMergeAggInfo(analyzer);
-  }
-
-  /**
-   * Create smap to map original grouping and aggregate exprs onto output
-   * of secondPhaseDistinctAggInfo.
-   */
-  private void createSecondPhaseAggSMap(
-      AggregateInfo inputAggInfo, ArrayList<FunctionCallExpr> distinctAggExprs) {
-    outputTupleSmap_.clear();
-    int slotIdx = 0;
-    ArrayList<SlotDescriptor> slotDescs = outputTupleDesc_.getSlots();
-
-    int numDistinctParams = distinctAggExprs.get(0).getChildren().size();
-    // If we are counting distinct params of group_concat, we cannot include the custom
-    // separator since it is not a distinct param.
-    if (distinctAggExprs.get(0).getFnName().getFunction().equalsIgnoreCase(
-        "group_concat")
-        && numDistinctParams == 2) {
-      --numDistinctParams;
-    }
-    int numOrigGroupingExprs =
-        inputAggInfo.getGroupingExprs().size() - numDistinctParams;
-    Preconditions.checkState(slotDescs.size() ==
-        numOrigGroupingExprs + distinctAggExprs.size() +
-        inputAggInfo.getAggregateExprs().size());
-
-    // original grouping exprs -> first m slots
-    for (int i = 0; i < numOrigGroupingExprs; ++i, ++slotIdx) {
-      Expr groupingExpr = inputAggInfo.getGroupingExprs().get(i);
-      outputTupleSmap_.put(
-          groupingExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
-    }
-
-    // distinct agg exprs -> next n slots
-    for (int i = 0; i < distinctAggExprs.size(); ++i, ++slotIdx) {
-      Expr aggExpr = distinctAggExprs.get(i);
-      outputTupleSmap_.put(
-          aggExpr.clone(), (new SlotRef(slotDescs.get(slotIdx))));
-    }
-
-    // remaining agg exprs -> remaining slots
-    for (int i = 0; i < inputAggInfo.getAggregateExprs().size(); ++i, ++slotIdx) {
-      Expr aggExpr = inputAggInfo.getAggregateExprs().get(i);
-      outputTupleSmap_.put(aggExpr.clone(), new SlotRef(slotDescs.get(slotIdx)));
-    }
-  }
-
-  /**
-   * Populates the output and intermediate smaps based on the output and intermediate
-   * tuples that are assumed to be set. If an intermediate tuple is required, also
-   * populates the output-to-intermediate smap and registers auxiliary equivalence
-   * predicates between the grouping slots of the two tuples.
-   */
-  public void createSmaps(Analyzer analyzer) {
-    Preconditions.checkNotNull(outputTupleDesc_);
-    Preconditions.checkNotNull(intermediateTupleDesc_);
-
-    List<Expr> exprs = Lists.newArrayListWithCapacity(
-        groupingExprs_.size() + aggregateExprs_.size());
-    exprs.addAll(groupingExprs_);
-    exprs.addAll(aggregateExprs_);
-    for (int i = 0; i < exprs.size(); ++i) {
-      outputTupleSmap_.put(exprs.get(i).clone(),
-          new SlotRef(outputTupleDesc_.getSlots().get(i)));
-      if (!requiresIntermediateTuple()) continue;
-      intermediateTupleSmap_.put(exprs.get(i).clone(),
-          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
-      outputToIntermediateTupleSmap_.put(
-          new SlotRef(outputTupleDesc_.getSlots().get(i)),
-          new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
-      if (i < groupingExprs_.size()) {
-        analyzer.createAuxEquivPredicate(
-            new SlotRef(outputTupleDesc_.getSlots().get(i)),
-            new SlotRef(intermediateTupleDesc_.getSlots().get(i)));
-      }
-    }
-    if (!requiresIntermediateTuple()) intermediateTupleSmap_ = outputTupleSmap_;
-
-    LOG.trace("output smap=" + outputTupleSmap_.debugString());
-    LOG.trace("intermediate smap=" + intermediateTupleSmap_.debugString());
-  }
-
-  /**
-   * Mark slots required for this aggregation as materialized:
-   * - all grouping output slots as well as grouping exprs
-   * - for non-distinct aggregation: the aggregate exprs of materialized aggregate slots;
-   *   this assumes that the output slots corresponding to aggregate exprs have already
-   *   been marked by the consumer of this select block
-   * - for distinct aggregation, we mark all aggregate output slots in order to keep
-   *   things simple
-   * Also computes materializedAggregateExprs.
-   * This call must be idempotent because it may be called more than once for Union stmt.
-   */
-  @Override
-  public void materializeRequiredSlots(Analyzer analyzer, ExprSubstitutionMap smap) {
-    for (int i = 0; i < groupingExprs_.size(); ++i) {
-      outputTupleDesc_.getSlots().get(i).setIsMaterialized(true);
-      intermediateTupleDesc_.getSlots().get(i).setIsMaterialized(true);
-    }
-
-    // collect input exprs: grouping exprs plus aggregate exprs that need to be
-    // materialized
-    materializedSlots_.clear();
-    List<Expr> exprs = Lists.newArrayList();
-    exprs.addAll(groupingExprs_);
-    for (int i = 0; i < aggregateExprs_.size(); ++i) {
-      SlotDescriptor slotDesc =
-          outputTupleDesc_.getSlots().get(groupingExprs_.size() + i);
-      SlotDescriptor intermediateSlotDesc =
-          intermediateTupleDesc_.getSlots().get(groupingExprs_.size() + i);
-      if (isDistinctAgg()) {
-        slotDesc.setIsMaterialized(true);
-        intermediateSlotDesc.setIsMaterialized(true);
-      }
-      if (!slotDesc.isMaterialized()) continue;
-      intermediateSlotDesc.setIsMaterialized(true);
-      exprs.add(aggregateExprs_.get(i));
-      materializedSlots_.add(i);
-    }
-    List<Expr> resolvedExprs = Expr.substituteList(exprs, smap, analyzer, false);
-    analyzer.materializeSlots(resolvedExprs);
-
-    if (isDistinctAgg()) {
-      secondPhaseDistinctAggInfo_.materializeRequiredSlots(analyzer, null);
-    }
-  }
-
-  /**
-   * Checks if all materialized aggregate expressions have distinct semantics.
-   * It returns true if either of the following is true:
-   * (1) all materialized aggregate expressions have distinct semantics
-   *     (e.g. MIN, MAX, NDV). In other words, this optimization will work
-   *     for COUNT(DISTINCT c) but not COUNT(c).
-   * (2) there are no aggregate expressions but only grouping expressions.
-   */
-  public boolean hasAllDistinctAgg() {
-    if (hasAggregateExprs()) {
-      for (FunctionCallExpr aggExpr : getMaterializedAggregateExprs()) {
-        if (!aggExpr.isDistinct() && !aggExpr.ignoresDistinct()) return false;
-      }
-    } else {
-      Preconditions.checkState(!groupingExprs_.isEmpty());
-    }
-    return true;
-  }
-
-  /**
-   * Validates the internal state of this agg info: Checks that the number of
-   * materialized slots of the output tuple corresponds to the number of materialized
-   * aggregate functions plus the number of grouping exprs. Also checks that the return
-   * types of the aggregate and grouping exprs correspond to the slots in the output
-   * tuple.
-   */
-  public void checkConsistency() {
-    ArrayList<SlotDescriptor> slots = outputTupleDesc_.getSlots();
-
-    // Check materialized slots.
-    int numMaterializedSlots = 0;
-    for (SlotDescriptor slotDesc: slots) {
-      if (slotDesc.isMaterialized()) ++numMaterializedSlots;
-    }
-    Preconditions.checkState(numMaterializedSlots ==
-        materializedSlots_.size() + groupingExprs_.size());
-
-    // Check that grouping expr return types match the slot descriptors.
-    int slotIdx = 0;
-    for (int i = 0; i < groupingExprs_.size(); ++i) {
-      Expr groupingExpr = groupingExprs_.get(i);
-      Type slotType = slots.get(slotIdx).getType();
-      Preconditions.checkState(groupingExpr.getType().equals(slotType),
-          String.format("Grouping expr %s returns type %s but its output tuple " +
-              "slot has type %s", groupingExpr.toSql(),
-              groupingExpr.getType().toString(), slotType.toString()));
-      ++slotIdx;
-    }
-    // Check that aggregate expr return types match the slot descriptors.
-    for (int i = 0; i < aggregateExprs_.size(); ++i) {
-      Expr aggExpr = aggregateExprs_.get(i);
-      Type slotType = slots.get(slotIdx).getType();
-      Preconditions.checkState(aggExpr.getType().equals(slotType),
-          String.format("Agg expr %s returns type %s but its output tuple " +
-              "slot has type %s", aggExpr.toSql(), aggExpr.getType().toString(),
-              slotType.toString()));
-      ++slotIdx;
-    }
-  }
-
-  /**
-   * Returns DataPartition derived from grouping exprs.
-   * Returns unpartitioned spec if no grouping.
-   * TODO: this won't work when we start supporting range partitions,
-   * because we could derive both hash and order-based partitions
-   */
-  public DataPartition getPartition() {
-    if (groupingExprs_.isEmpty()) {
-      return DataPartition.UNPARTITIONED;
-    } else {
-      return DataPartition.hashPartitioned(groupingExprs_);
-    }
-  }
-
-  @Override
-  public String debugString() {
-    StringBuilder out = new StringBuilder(super.debugString());
-    out.append(Objects.toStringHelper(this)
-        .add("phase", aggPhase_)
-        .add("intermediate_smap", intermediateTupleSmap_.debugString())
-        .add("output_smap", outputTupleSmap_.debugString())
-        .toString());
-    if (mergeAggInfo_ != this && mergeAggInfo_ != null) {
-      out.append("\nmergeAggInfo:\n" + mergeAggInfo_.debugString());
-    }
-    if (secondPhaseDistinctAggInfo_ != null) {
-      out.append("\nsecondPhaseDistinctAggInfo:\n"
-          + secondPhaseDistinctAggInfo_.debugString());
-    }
-    return out.toString();
-  }
-
-  @Override
-  protected String tupleDebugName() { return "agg-tuple"; }
-
-  @Override
-  public AggregateInfo clone() { return new AggregateInfo(this); }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java b/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java
deleted file mode 100644
index f3ad3f8..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AggregateInfoBase.java
+++ /dev/null
@@ -1,221 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.catalog.AggregateFunction;
-import com.cloudera.impala.catalog.ColumnStats;
-import com.cloudera.impala.catalog.Type;
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Base class for AggregateInfo and AnalyticInfo containing the intermediate and output
- * tuple descriptors as well as their smaps for evaluating aggregate functions.
- */
-public abstract class AggregateInfoBase {
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AggregateInfoBase.class);
-
-  // For aggregations: All unique grouping expressions from a select block.
-  // For analytics: Empty.
-  protected ArrayList<Expr> groupingExprs_;
-
-  // For aggregations: All unique aggregate expressions from a select block.
-  // For analytics: The results of AnalyticExpr.getFnCall() for the unique
-  // AnalyticExprs of a select block.
-  protected ArrayList<FunctionCallExpr> aggregateExprs_;
-
-  // The tuple into which the intermediate output of an aggregation is materialized.
-  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
-  // contain the values of the grouping exprs, followed by slots into which the
-  // aggregateExprs' update()/merge() symbols materialize their output, i.e., slots
-  // of the aggregate functions' intermediate types.
-  // Identical to outputTupleDesc_ if no aggregateExpr has an output type that is
-  // different from its intermediate type.
-  protected TupleDescriptor intermediateTupleDesc_;
-
-  // The tuple into which the final output of the aggregation is materialized.
-  // Contains groupingExprs.size() + aggregateExprs.size() slots, the first of which
-  // contain the values of the grouping exprs, followed by slots into which the
-  // aggregateExprs' finalize() symbol write its result, i.e., slots of the aggregate
-  // functions' output types.
-  protected TupleDescriptor outputTupleDesc_;
-
-  // For aggregation: indices into aggregate exprs for that need to be materialized
-  // For analytics: indices into the analytic exprs and their corresponding aggregate
-  // exprs that need to be materialized.
-  // Populated in materializeRequiredSlots() which must be implemented by subclasses.
-  protected ArrayList<Integer> materializedSlots_ = Lists.newArrayList();
-
-  protected AggregateInfoBase(ArrayList<Expr> groupingExprs,
-      ArrayList<FunctionCallExpr> aggExprs)  {
-    Preconditions.checkState(groupingExprs != null || aggExprs != null);
-    groupingExprs_ =
-        groupingExprs != null ? Expr.cloneList(groupingExprs) : new ArrayList<Expr>();
-    Preconditions.checkState(aggExprs != null || !(this instanceof AnalyticInfo));
-    aggregateExprs_ =
-        aggExprs != null ? Expr.cloneList(aggExprs) : new ArrayList<FunctionCallExpr>();
-  }
-
-  /**
-   * C'tor for cloning.
-   */
-  protected AggregateInfoBase(AggregateInfoBase other) {
-    groupingExprs_ =
-        (other.groupingExprs_ != null) ? Expr.cloneList(other.groupingExprs_) : null;
-    aggregateExprs_ =
-        (other.aggregateExprs_ != null) ? Expr.cloneList(other.aggregateExprs_) : null;
-    intermediateTupleDesc_ = other.intermediateTupleDesc_;
-    outputTupleDesc_ = other.outputTupleDesc_;
-    materializedSlots_ = Lists.newArrayList(other.materializedSlots_);
-  }
-
-  /**
-   * Creates the intermediate and output tuple descriptors. If no agg expr has an
-   * intermediate type different from its output type, then only the output tuple
-   * descriptor is created and the intermediate tuple is set to the output tuple.
-   */
-  protected void createTupleDescs(Analyzer analyzer) {
-    // Create the intermediate tuple desc first, so that the tuple ids are increasing
-    // from bottom to top in the plan tree.
-    intermediateTupleDesc_ = createTupleDesc(analyzer, false);
-    if (requiresIntermediateTuple(aggregateExprs_)) {
-      outputTupleDesc_ = createTupleDesc(analyzer, true);
-    } else {
-      outputTupleDesc_ = intermediateTupleDesc_;
-    }
-  }
-
-  /**
-   * Returns a tuple descriptor for the aggregation/analytic's intermediate or final
-   * result, depending on whether isOutputTuple is true or false.
-   * Also updates the appropriate substitution map, and creates and registers auxiliary
-   * equality predicates between the grouping slots and the grouping exprs.
-   */
-  private TupleDescriptor createTupleDesc(Analyzer analyzer, boolean isOutputTuple) {
-    TupleDescriptor result =
-        analyzer.getDescTbl().createTupleDescriptor(
-          tupleDebugName() + (isOutputTuple ? "-out" : "-intermed"));
-    List<Expr> exprs = Lists.newArrayListWithCapacity(
-        groupingExprs_.size() + aggregateExprs_.size());
-    exprs.addAll(groupingExprs_);
-    exprs.addAll(aggregateExprs_);
-
-    int aggregateExprStartIndex = groupingExprs_.size();
-    for (int i = 0; i < exprs.size(); ++i) {
-      Expr expr = exprs.get(i);
-      SlotDescriptor slotDesc = analyzer.addSlotDescriptor(result);
-      slotDesc.initFromExpr(expr);
-      if (i < aggregateExprStartIndex) {
-        // register equivalence between grouping slot and grouping expr;
-        // do this only when the grouping expr isn't a constant, otherwise
-        // it'll simply show up as a gratuitous HAVING predicate
-        // (which would actually be incorrect if the constant happens to be NULL)
-        if (!expr.isConstant()) {
-          analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
-        }
-      } else {
-        Preconditions.checkArgument(expr instanceof FunctionCallExpr);
-        FunctionCallExpr aggExpr = (FunctionCallExpr)expr;
-        if (aggExpr.isMergeAggFn()) {
-          slotDesc.setLabel(aggExpr.getChild(0).toSql());
-          slotDesc.setSourceExpr(aggExpr.getChild(0));
-        } else {
-          slotDesc.setLabel(aggExpr.toSql());
-          slotDesc.setSourceExpr(aggExpr);
-        }
-
-        // count(*) is non-nullable.
-        if (aggExpr.getFnName().getFunction().equals("count")) {
-          // TODO: Consider making nullability a property of types or of builtin agg fns.
-          // row_number, rank, and dense_rank are non-nullable as well.
-          slotDesc.setIsNullable(false);
-        }
-        if (!isOutputTuple) {
-          Type intermediateType = ((AggregateFunction)aggExpr.fn_).getIntermediateType();
-          if (intermediateType != null) {
-            // Use the output type as intermediate if the function has a wildcard decimal.
-            if (!intermediateType.isWildcardDecimal()) {
-              slotDesc.setType(intermediateType);
-            } else {
-              Preconditions.checkState(expr.getType().isDecimal());
-            }
-          }
-        }
-      }
-    }
-    String prefix = (isOutputTuple ? "result " : "intermediate ");
-    LOG.trace(prefix + " tuple=" + result.debugString());
-    return result;
-  }
-
-  /**
-   * Marks the slots required for evaluating an Analytic/AggregateInfo by
-   * resolving the materialized aggregate/analytic exprs against smap,
-   * and then marking their slots.
-   */
-  public abstract void materializeRequiredSlots(Analyzer analyzer,
-      ExprSubstitutionMap smap);
-
-  public ArrayList<Expr> getGroupingExprs() { return groupingExprs_; }
-  public ArrayList<FunctionCallExpr> getAggregateExprs() { return aggregateExprs_; }
-  public TupleDescriptor getOutputTupleDesc() { return outputTupleDesc_; }
-  public TupleDescriptor getIntermediateTupleDesc() { return intermediateTupleDesc_; }
-  public TupleId getIntermediateTupleId() { return intermediateTupleDesc_.getId(); }
-  public TupleId getOutputTupleId() { return outputTupleDesc_.getId(); }
-  public boolean requiresIntermediateTuple() {
-    Preconditions.checkNotNull(intermediateTupleDesc_);
-    Preconditions.checkNotNull(outputTupleDesc_);
-    return intermediateTupleDesc_ != outputTupleDesc_;
-  }
-
-  /**
-   * Returns true if evaluating the given aggregate exprs requires an intermediate tuple,
-   * i.e., whether one of the aggregate functions has an intermediate type different from
-   * its output type.
-   */
-  public static <T extends Expr> boolean requiresIntermediateTuple(List<T> aggExprs) {
-    for (Expr aggExpr: aggExprs) {
-      Type intermediateType = ((AggregateFunction) aggExpr.fn_).getIntermediateType();
-      if (intermediateType != null) return true;
-    }
-    return false;
-  }
-
-  public String debugString() {
-    StringBuilder out = new StringBuilder();
-    out.append(Objects.toStringHelper(this)
-        .add("grouping_exprs", Expr.debugString(groupingExprs_))
-        .add("aggregate_exprs", Expr.debugString(aggregateExprs_))
-        .add("intermediate_tuple", (intermediateTupleDesc_ == null)
-            ? "null" : intermediateTupleDesc_.debugString())
-        .add("output_tuple", (outputTupleDesc_ == null)
-            ? "null" : outputTupleDesc_.debugString())
-        .toString());
-    return out.toString();
-  }
-
-  protected abstract String tupleDebugName();
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java
deleted file mode 100644
index a5cb2ca..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddPartitionStmt.java
+++ /dev/null
@@ -1,117 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.common.FileSystemUtil;
-import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.permission.FsAction;
-
-/**
- * Represents an ALTER TABLE ADD PARTITION statement.
- */
-public class AlterTableAddPartitionStmt extends AlterTableStmt {
-  private final HdfsUri location_;
-  private final boolean ifNotExists_;
-  private final PartitionSpec partitionSpec_;
-  private final HdfsCachingOp cacheOp_;
-
-  public AlterTableAddPartitionStmt(TableName tableName,
-      PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
-      HdfsCachingOp cacheOp) {
-    super(tableName);
-    Preconditions.checkState(partitionSpec != null);
-    location_ = location;
-    ifNotExists_ = ifNotExists;
-    partitionSpec_ = partitionSpec;
-    partitionSpec_.setTableName(tableName);
-    cacheOp_ = cacheOp;
-  }
-
-  public boolean getIfNotExists() { return ifNotExists_; }
-  public HdfsUri getLocation() { return location_; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
-    sb.append(" ADD ");
-    if (ifNotExists_) {
-      sb.append("IF NOT EXISTS ");
-    }
-    sb.append(" " + partitionSpec_.toSql());
-    if (location_ != null) {
-      sb.append(String.format(" LOCATION '%s'", location_));
-    }
-    if (cacheOp_ != null) sb.append(cacheOp_.toSql());
-    return sb.toString();
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_PARTITION);
-    TAlterTableAddPartitionParams addPartParams = new TAlterTableAddPartitionParams();
-    addPartParams.setPartition_spec(partitionSpec_.toThrift());
-    addPartParams.setLocation(location_ == null ? null : location_.toString());
-    addPartParams.setIf_not_exists(ifNotExists_);
-    if (cacheOp_ != null) addPartParams.setCache_op(cacheOp_.toThrift());
-    params.setAdd_partition_params(addPartParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
-    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    partitionSpec_.analyze(analyzer);
-
-    if (location_ != null) {
-      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
-    }
-
-    boolean shouldCache = false;
-    Table table = getTargetTable();
-    if (cacheOp_ != null) {
-      cacheOp_.analyze(analyzer);
-      shouldCache = cacheOp_.shouldCache();
-    } else if (table instanceof HdfsTable) {
-      shouldCache = ((HdfsTable)table).isMarkedCached();
-    }
-    if (shouldCache) {
-      if (!(table instanceof HdfsTable)) {
-        throw new AnalysisException("Caching must target a HDFS table: " +
-            table.getFullName());
-      }
-      HdfsTable hdfsTable = (HdfsTable)table;
-      if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath())) ||
-          (location_ == null && !hdfsTable.isLocationCacheable())) {
-        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
-            "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
-            (location_ != null) ? location_.toString() : hdfsTable.getLocation(),
-            table.getFullName()));
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java
deleted file mode 100644
index aaa223a..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableAddReplaceColsStmt.java
+++ /dev/null
@@ -1,108 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.Column;
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Represents an ALTER TABLE ADD|REPLACE COLUMNS (colDef1, colDef2, ...) statement.
- */
-public class AlterTableAddReplaceColsStmt extends AlterTableStmt {
-  private final List<ColumnDef> columnDefs_;
-  private final boolean replaceExistingCols_;
-
-  public AlterTableAddReplaceColsStmt(TableName tableName, List<ColumnDef> columnDefs,
-      boolean replaceExistingCols) {
-    super(tableName);
-    Preconditions.checkState(columnDefs != null && columnDefs.size() > 0);
-    columnDefs_ = Lists.newArrayList(columnDefs);
-    replaceExistingCols_ = replaceExistingCols;
-  }
-
-  public List<ColumnDef> getColumnDescs() { return columnDefs_; }
-
-  // Replace columns instead of appending new columns.
-  public boolean getReplaceExistingCols() {
-    return replaceExistingCols_;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_REPLACE_COLUMNS);
-    TAlterTableAddReplaceColsParams colParams = new TAlterTableAddReplaceColsParams();
-    for (ColumnDef col: getColumnDescs()) {
-      colParams.addToColumns(col.toThrift());
-    }
-    colParams.setReplace_existing_cols(replaceExistingCols_);
-    params.setAdd_replace_cols_params(colParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support column-level DDL on HBase tables. Requires updating the column
-    // mappings along with the table columns.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE ADD|REPLACE COLUMNS not currently " +
-          "supported on HBase tables.");
-    }
-
-    // Build a set of the partition keys for the table.
-    Set<String> existingPartitionKeys = Sets.newHashSet();
-    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
-      existingPartitionKeys.add(fs.getName().toLowerCase());
-    }
-
-    // Make sure the new columns don't already exist in the table, that the names
-    // are all valid and unique, and that none of the columns conflict with
-    // partition columns.
-    Set<String> colNames = Sets.newHashSet();
-    for (ColumnDef c: columnDefs_) {
-      c.analyze();
-      String colName = c.getColName().toLowerCase();
-      if (existingPartitionKeys.contains(colName)) {
-        throw new AnalysisException(
-            "Column name conflicts with existing partition column: " + colName);
-      }
-
-      Column col = t.getColumn(colName);
-      if (col != null && !replaceExistingCols_) {
-        throw new AnalysisException("Column already exists: " + colName);
-      } else if (!colNames.add(colName)) {
-        throw new AnalysisException("Duplicate column name: " + colName);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java
deleted file mode 100644
index c733ca0..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableChangeColStmt.java
+++ /dev/null
@@ -1,101 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableChangeColParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE CHANGE COLUMN colName newColDef statement.
- * Note: It would be fairly simple to reuse this class to support ALTER TABLE MODIFY
- * newColDef statements in the future my making colName optional.
- */
-public class AlterTableChangeColStmt extends AlterTableStmt {
-  private final String colName_;
-  private final ColumnDef newColDef_;
-
-  public AlterTableChangeColStmt(TableName tableName, String colName,
-      ColumnDef newColDef) {
-    super(tableName);
-    Preconditions.checkNotNull(newColDef);
-    Preconditions.checkState(colName != null && !colName.isEmpty());
-    colName_ = colName;
-    newColDef_ = newColDef;
-  }
-
-  public String getColName() { return colName_; }
-  public ColumnDef getNewColDef() { return newColDef_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.CHANGE_COLUMN);
-    TAlterTableChangeColParams colParams = new TAlterTableChangeColParams();
-    colParams.setCol_name(colName_);
-    colParams.setNew_col_def(newColDef_.toThrift());
-    params.setChange_col_params(colParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support column-level DDL on HBase tables. Requires updating the column
-    // mappings along with the table columns.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE CHANGE COLUMN not currently supported " +
-          "on HBase tables.");
-    }
-    String tableName = getDb() + "." + getTbl();
-
-    // Verify there are no conflicts with partition columns.
-    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
-      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
-        throw new AnalysisException("Cannot modify partition column: " + colName_);
-      }
-      if (fs.getName().toLowerCase().equals(newColDef_.getColName().toLowerCase())) {
-        throw new AnalysisException(
-            "Column name conflicts with existing partition column: " +
-            newColDef_.getColName());
-      }
-    }
-
-    // Verify the column being modified exists in the table
-    if (t.getColumn(colName_) == null) {
-      throw new AnalysisException(String.format(
-          "Column '%s' does not exist in table: %s", colName_, tableName));
-    }
-
-    // Check that the new column def's name is valid.
-    newColDef_.analyze();
-    // Verify that if the column name is being changed, the new name doesn't conflict
-    // with an existing column.
-    if (!colName_.toLowerCase().equals(newColDef_.getColName().toLowerCase()) &&
-        t.getColumn(newColDef_.getColName()) != null) {
-      throw new AnalysisException("Column already exists: " + newColDef_.getColName());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java
deleted file mode 100644
index d7f5ab5..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropColStmt.java
+++ /dev/null
@@ -1,84 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-
-import com.cloudera.impala.catalog.HBaseTable;
-import com.cloudera.impala.catalog.Table;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableDropColParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE DROP COLUMN statement.
- * Note: Hive does not support this syntax for droppping columns, but it is supported
- * by mysql.
- */
-public class AlterTableDropColStmt extends AlterTableStmt {
-  private final String colName_;
-
-  public AlterTableDropColStmt(TableName tableName, String colName) {
-    super(tableName);
-    Preconditions.checkState(colName != null && !colName.isEmpty());
-    colName_ = colName;
-  }
-
-  public String getColName() { return colName_; }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.DROP_COLUMN);
-    TAlterTableDropColParams dropColParams = new TAlterTableDropColParams(colName_);
-    params.setDrop_col_params(dropColParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    Table t = getTargetTable();
-    // TODO: Support column-level DDL on HBase tables. Requires updating the column
-    // mappings along with the table columns.
-    if (t instanceof HBaseTable) {
-      throw new AnalysisException("ALTER TABLE DROP COLUMN not currently supported " +
-          "on HBase tables.");
-    }
-    String tableName = getDb() + "." + getTbl();
-
-    for (FieldSchema fs: t.getMetaStoreTable().getPartitionKeys()) {
-      if (fs.getName().toLowerCase().equals(colName_.toLowerCase())) {
-        throw new AnalysisException("Cannot drop partition column: " + fs.getName());
-      }
-    }
-
-    if (t.getColumns().size() - t.getMetaStoreTable().getPartitionKeysSize() <= 1) {
-      throw new AnalysisException(String.format(
-          "Cannot drop column '%s' from %s. Tables must contain at least 1 column.",
-          colName_, tableName));
-    }
-
-    if (t.getColumn(colName_) == null) {
-      throw new AnalysisException(String.format(
-          "Column '%s' does not exist in table: %s", colName_, tableName));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java
deleted file mode 100644
index f8bc09c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableDropPartitionStmt.java
+++ /dev/null
@@ -1,79 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE DROP PARTITION statement.
- */
-public class AlterTableDropPartitionStmt extends AlterTableStmt {
-  private final boolean ifExists_;
-  private final PartitionSpec partitionSpec_;
-
-  // Setting this value causes dropped partition(s) to be permanently
-  // deleted. For example, for HDFS tables it skips the trash mechanism
-  private final boolean purgePartition_;
-
-  public AlterTableDropPartitionStmt(TableName tableName,
-      PartitionSpec partitionSpec, boolean ifExists, boolean purgePartition) {
-    super(tableName);
-    Preconditions.checkNotNull(partitionSpec);
-    partitionSpec_ = partitionSpec;
-    partitionSpec_.setTableName(tableName);
-    ifExists_ = ifExists;
-    purgePartition_ = purgePartition;
-  }
-
-  public boolean getIfNotExists() { return ifExists_; }
-
-  @Override
-  public String toSql() {
-    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
-    sb.append(" DROP ");
-    if (ifExists_) sb.append("IF EXISTS ");
-    sb.append(" DROP " + partitionSpec_.toSql());
-    if (purgePartition_) sb.append(" PURGE");
-    return sb.toString();
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.DROP_PARTITION);
-    TAlterTableDropPartitionParams addPartParams = new TAlterTableDropPartitionParams();
-    addPartParams.setPartition_spec(partitionSpec_.toThrift());
-    addPartParams.setIf_exists(ifExists_);
-    addPartParams.setPurge(purgePartition_);
-    params.setDrop_partition_params(addPartParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-    if (!ifExists_) partitionSpec_.setPartitionShouldExist();
-    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    partitionSpec_.analyze(analyzer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java
deleted file mode 100644
index 009535c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableOrViewRenameStmt.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.authorization.Privilege;
-import com.cloudera.impala.catalog.View;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAccessEvent;
-import com.cloudera.impala.thrift.TAlterTableOrViewRenameParams;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-import com.cloudera.impala.thrift.TTableName;
-import com.google.common.base.Preconditions;
-
-/**
- * Represents an ALTER TABLE/VIEW RENAME statement.
- */
-public class AlterTableOrViewRenameStmt extends AlterTableStmt {
-  protected final TableName newTableName_;
-
-  // Set during analysis
-  protected String newDbName_;
-
-  //  True if we are renaming a table. False if we are renaming a view.
-  protected final boolean renameTable_;
-
-  public AlterTableOrViewRenameStmt(TableName oldTableName, TableName newTableName,
-      boolean renameTable) {
-    super(oldTableName);
-    Preconditions.checkState(newTableName != null && !newTableName.isEmpty());
-    newTableName_ = newTableName;
-    renameTable_ = renameTable;
-  }
-
-  public String getNewTbl() {
-    return newTableName_.getTbl();
-  }
-
-  public String getNewDb() {
-    Preconditions.checkNotNull(newDbName_);
-    return newDbName_;
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(
-        (renameTable_) ? TAlterTableType.RENAME_TABLE : TAlterTableType.RENAME_VIEW);
-    TAlterTableOrViewRenameParams renameParams =
-        new TAlterTableOrViewRenameParams(new TTableName(getNewDb(), getNewTbl()));
-    params.setRename_params(renameParams);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    newTableName_.analyze();
-    table_ = analyzer.getTable(tableName_, Privilege.ALTER);
-    if (table_ instanceof View && renameTable_) {
-      throw new AnalysisException(String.format(
-          "ALTER TABLE not allowed on a view: %s", table_.getFullName()));
-    }
-    if (!(table_ instanceof View) && !renameTable_) {
-      throw new AnalysisException(String.format(
-          "ALTER VIEW not allowed on a table: %s", table_.getFullName()));
-    }
-    newDbName_ = analyzer.getTargetDbName(newTableName_);
-    if (analyzer.dbContainsTable(newDbName_, newTableName_.getTbl(), Privilege.CREATE)) {
-      throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG +
-          String.format("%s.%s", newDbName_, getNewTbl()));
-    }
-    analyzer.addAccessEvent(new TAccessEvent(newDbName_ + "." + newTableName_.getTbl(),
-        table_.getCatalogObjectType(), Privilege.CREATE.toString()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java
deleted file mode 100644
index c7e796c..0000000
--- a/fe/src/main/java/com/cloudera/impala/analysis/AlterTableRecoverPartitionsStmt.java
+++ /dev/null
@@ -1,56 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.analysis;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.cloudera.impala.thrift.TAlterTableParams;
-import com.cloudera.impala.thrift.TAlterTableType;
-
-/**
- * Represents an ALTER TABLE RECOVER PARTITIONS statement.
- */
-public class AlterTableRecoverPartitionsStmt extends AlterTableStmt {
-
-  public AlterTableRecoverPartitionsStmt(TableName tableName) {
-    super(tableName);
-  }
-
-  @Override
-  public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.RECOVER_PARTITIONS);
-    return params;
-  }
-
-  @Override
-  public void analyze(Analyzer analyzer) throws AnalysisException {
-    super.analyze(analyzer);
-
-    // Make sure the target table is HdfsTable.
-    if (!(table_ instanceof HdfsTable)) {
-      throw new AnalysisException("ALTER TABLE RECOVER PARTITIONS " +
-          "must target an HDFS table: " + tableName_);
-    }
-
-    // Make sure the target table is partitioned.
-    if (table_.getMetaStoreTable().getPartitionKeysSize() == 0) {
-      throw new AnalysisException("Table is not partitioned: " + tableName_);
-    }
-  }
-}